Instruction stringlengths 13 145k | input_code stringlengths 35 390k | output_code stringlengths 35 390k |
|---|---|---|
Set `nomad_job_id` for compendia jobs
### Context
I was trying to look for a compendia processor job where I only had the `nomad_job_id`
### Problem or idea
Looks like we don't set the `nomad_job_id` for the compendia processor jobs
```sql
$ select * from processor_jobs where pipeline_applied='COMPENDIA' and start_time > now() - '48 hours'::interval;
id | pipeline_applied | no_retry | ram_amount | volume_index | start_time | end_time | success | nomad_job_id | num_retries | retried | worker_id | worker_version | failure_reason | created_at
| last_modified | retried_job_id
----------+------------------+----------+------------+--------------+-------------------------------+-------------------------------+---------+--------------+-------------+---------+---------------------+-----------------+---------------------------------------------------------------------------------------------------------------------------------+-----------------------------
--+-------------------------------+----------------
7723013 | COMPENDIA | f | 2048 | | 2019-09-26 19:06:39.774994+00 | 2019-09-27 00:19:36.860523+00 | f | | 0 | f | i-0aad2ec47c070fad1 | v1.27.14-hotfix | Failed to upload computed file. | 2019-09-26 19:06:39.708193+0
0 | 2019-09-27 00:19:36.860547+00 |
7540834 | COMPENDIA | f | 2048 | | 2019-09-26 17:21:04.618606+00 | | | | 0 | f | i-0aad2ec47c070fad1 | v1.27.14-hotfix | | 2019-09-26 17:21:03.267152+0
0 | 2019-09-26 17:21:04.618685+00 |
7540448 | COMPENDIA | f | 2048 | | 2019-09-26 17:20:57.091504+00 | | | | 0 | f | i-0aad2ec47c070fad1 | v1.27.14-hotfix | | 2019-09-26 17:20:55.186121+0
0 | 2019-09-26 17:20:57.091538+00 |
```
### Solution or next step
The `nomad_job_id` should have a value, similar to other jobs that we have.
| common/data_refinery_common/utils.py
<|code_start|>import csv
import hashlib
import io
import os
import re
from functools import partial
from itertools import groupby
from typing import Dict, Set
from urllib.parse import urlparse
import nomad
import requests
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from retrying import retry
from data_refinery_common.performant_pagination.pagination import PerformantPaginator
# Found: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
METADATA_URL = "http://169.254.169.254/latest/meta-data"
INSTANCE_ID = None
SUPPORTED_MICROARRAY_PLATFORMS = None
SUPPORTED_RNASEQ_PLATFORMS = None
READABLE_PLATFORM_NAMES = None
def get_env_variable(var_name: str, default: str=None) -> str:
""" Get an environment variable or return a default value """
try:
return os.environ[var_name]
except KeyError:
if default:
return default
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
def get_env_variable_gracefully(var_name: str, default: str=None) -> str:
"""
Get an environment variable, or return a default value, but always fail gracefully and return
something rather than raising an ImproperlyConfigured error.
"""
try:
return os.environ[var_name]
except KeyError:
return default
def get_instance_id() -> str:
"""Returns the AWS instance id where this is running or "local"."""
global INSTANCE_ID
if INSTANCE_ID is None:
if settings.RUNNING_IN_CLOUD:
@retry(stop_max_attempt_number=3)
def retrieve_instance_id():
return requests.get(os.path.join(METADATA_URL, "instance-id")).text
INSTANCE_ID = retrieve_instance_id()
else:
INSTANCE_ID = "local"
return INSTANCE_ID
def get_worker_id() -> str:
"""Returns <instance_id>/<thread_id>."""
return get_instance_id() + "/" + current_process().name
def get_volume_index(path='/home/user/data_store/VOLUME_INDEX') -> str:
""" Reads the contents of the VOLUME_INDEX file, else returns default """
if settings.RUNNING_IN_CLOUD:
default = "-1"
else:
default = "0"
try:
with open(path, 'r') as f:
v_id = f.read().strip()
return v_id
except Exception as e:
# Our configured logger needs util, so we use the standard logging library for just this.
import logging
logger = logging.getLogger(__name__)
logger.info(str(e))
logger.info("Could not read volume index file, using default: " + str(default))
return default
def get_nomad_jobs() -> list:
"""Calls nomad service and return all jobs"""
try:
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
return nomad_client.jobs.get_jobs()
except nomad.api.exceptions.BaseNomadException:
# Nomad is not available right now
return []
def get_active_volumes() -> Set[str]:
"""Returns a Set of indices for volumes that are currently mounted.
These can be used to determine which jobs would actually be able
to be placed if they were queued up.
"""
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
volumes = set()
try:
for node in nomad_client.nodes.get_nodes():
node_detail = nomad_client.node.get_node(node["ID"])
if 'Status' in node_detail and node_detail['Status'] == 'ready' \
and 'Meta' in node_detail and 'volume_index' in node_detail['Meta']:
volumes.add(node_detail['Meta']['volume_index'])
except nomad.api.exceptions.BaseNomadException:
# Nomad is down, return the empty set.
pass
return volumes
def get_active_volumes_detailed() -> Dict:
"""Returns the instance type and number of allocations (jobs) for each active volume
These can be used to determine which jobs would actually be able
to be placed if they were queued up.
"""
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
volumes = dict()
try:
for node in nomad_client.nodes.get_nodes():
node_detail = nomad_client.node.get_node(node["ID"])
allocations = len(nomad_client.node.get_allocations(node["ID"]))
if 'Status' in node_detail and node_detail['Status'] == 'ready' \
and 'Meta' in node_detail and 'volume_index' in node_detail['Meta']:
volume_info = dict()
volume_info["type"] = node_detail['Attributes'].get("platform.aws.instance-type",
None)
volume_info["allocations"] = allocations
volumes[node_detail['Meta']['volume_index']] = volume_info
except nomad.api.exceptions.BaseNomadException:
# Nomad is down, return the empty dict.
pass
return volumes
def get_supported_microarray_platforms(platforms_csv: str="config/supported_microarray_platforms.csv"
) -> list:
"""
Loads our supported microarray platforms file and returns a list of dictionaries
containing the internal accession, the external accession, and a boolean indicating
whether or not the platform supports brainarray.
CSV must be in the format:
Internal Accession | External Accession | Supports Brainarray
"""
global SUPPORTED_MICROARRAY_PLATFORMS
if SUPPORTED_MICROARRAY_PLATFORMS is not None:
return SUPPORTED_MICROARRAY_PLATFORMS
SUPPORTED_MICROARRAY_PLATFORMS = []
with open(platforms_csv) as platforms_file:
reader = csv.reader(platforms_file)
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num is 1:
continue
external_accession = line[1]
is_brainarray = True if line[2] == 'y' else False
SUPPORTED_MICROARRAY_PLATFORMS.append({"platform_accession": line[0],
"external_accession": external_accession,
"is_brainarray": is_brainarray})
# A-GEOD-13158 is the same platform as GPL13158 and this
# pattern is generalizable. Since we don't want to have to
# list a lot of platforms twice just with different prefixes,
# we just convert them and add them to the list.
if external_accession[:6] == "A-GEOD":
converted_accession = external_accession.replace("A-GEOD-", "GPL")
SUPPORTED_MICROARRAY_PLATFORMS.append({"platform_accession": line[0],
"external_accession": converted_accession,
"is_brainarray": is_brainarray})
# Our list of supported platforms contains both A-GEOD-*
# and GPL*, so convert both ways.
if external_accession[:3] == "GPL":
converted_accession = external_accession.replace("GPL", "A-GEOD-")
SUPPORTED_MICROARRAY_PLATFORMS.append({"platform_accession": line[0],
"external_accession": converted_accession,
"is_brainarray": is_brainarray})
return SUPPORTED_MICROARRAY_PLATFORMS
def get_supported_rnaseq_platforms(platforms_list: str="config/supported_rnaseq_platforms.txt"
) -> list:
"""
Returns a list of RNASeq platforms which are currently supported.
"""
global SUPPORTED_RNASEQ_PLATFORMS
if SUPPORTED_RNASEQ_PLATFORMS is not None:
return SUPPORTED_RNASEQ_PLATFORMS
SUPPORTED_RNASEQ_PLATFORMS = []
with open(platforms_list) as platforms_file:
for line in platforms_file:
SUPPORTED_RNASEQ_PLATFORMS.append(line.strip())
return SUPPORTED_RNASEQ_PLATFORMS
def get_readable_affymetrix_names(mapping_csv: str="config/readable_affymetrix_names.csv") -> Dict:
"""
Loads the mapping from human readble names to internal accessions for Affymetrix platforms.
CSV must be in the format:
Readable Name | Internal Accession
Returns a dictionary mapping from internal accessions to human readable names.
"""
global READABLE_PLATFORM_NAMES
if READABLE_PLATFORM_NAMES is not None:
return READABLE_PLATFORM_NAMES
READABLE_PLATFORM_NAMES = {}
with open(mapping_csv, encoding='utf-8') as mapping_file:
reader = csv.reader(mapping_file, )
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num is 1:
continue
READABLE_PLATFORM_NAMES[line[1]] = line[0]
return READABLE_PLATFORM_NAMES
def get_internal_microarray_accession(accession_code):
platforms = get_supported_microarray_platforms()
for platform in platforms:
if platform['external_accession'] == accession_code:
return platform['platform_accession']
elif platform['platform_accession'] == accession_code:
return platform['platform_accession']
return None
def get_normalized_platform(external_accession):
"""
Handles a weirdo cases, where external_accessions in the format
hugene10stv1 -> hugene10st
"""
matches = re.findall(r"stv\d$", external_accession)
for match in matches:
external_accession = external_accession.replace(match, 'st')
return external_accession
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ''
path = ''
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip('/')
return bucket, path
def get_s3_url(s3_bucket: str, s3_key: str) -> str:
"""
Calculates the s3 URL for a file from the bucket name and the file key.
"""
return "%s.s3.amazonaws.com/%s" % (s3_bucket, s3_key)
def calculate_file_size(absolute_file_path):
return os.path.getsize(absolute_file_path)
def calculate_sha1(absolute_file_path):
hash_object = hashlib.sha1()
with open(absolute_file_path, mode='rb') as open_file:
for buf in iter(partial(open_file.read, io.DEFAULT_BUFFER_SIZE), b''):
hash_object.update(buf)
return hash_object.hexdigest()
def get_sra_download_url(run_accession, protocol="fasp"):
"""Try getting the sra-download URL from CGI endpoint"""
#Ex: curl --data "acc=SRR6718414&accept-proto=fasp&version=2.0" https://www.ncbi.nlm.nih.gov/Traces/names/names.cgi
cgi_url = "https://www.ncbi.nlm.nih.gov/Traces/names/names.cgi"
data = "acc=" + run_accession + "&accept-proto=" + protocol + "&version=2.0"
try:
resp = requests.post(cgi_url, data=data)
except Exception as e:
# Our configured logger needs util, so we use the standard logging library for just this.
import logging
logger = logging.getLogger(__name__)
logger.exception("Bad CGI request!: " + str(cgi_url) + ", " + str(data))
return None
if resp.status_code != 200:
# This isn't on the new servers
return None
else:
try:
# From: '#2.0\nsrapub|DRR002116|2324796808|2013-07-03T05:51:55Z|50964cfc69091cdbf92ea58aaaf0ac1c||fasp://dbtest@sra-download.ncbi.nlm.nih.gov:data/sracloud/traces/dra0/DRR/000002/DRR002116|200|ok\n'
# To: 'dbtest@sra-download.ncbi.nlm.nih.gov:data/sracloud/traces/dra0/DRR/000002/DRR002116'
# Sometimes, the responses from names.cgi makes no sense at all on a per-accession-code basis. This helps us handle that.
# $ curl --data "acc=SRR5818019&accept-proto=fasp&version=2.0" https://www.ncbi.nlm.nih.gov/Traces/names/names.cgi
# 2.0\nremote|SRR5818019|434259775|2017-07-11T21:32:08Z|a4bfc16dbab1d4f729c4552e3c9519d1|||400|Only 'https' protocol is allowed for this object
protocol_header = protocol + '://'
sra_url = resp.text.split('\n')[1].split('|')[6]
return sra_url
except Exception as e:
# Our configured logger needs util, so we use the standard logging library for just this.
import logging
logger = logging.getLogger(__name__)
logger.exception("Error parsing CGI response: " + str(cgi_url) + " " + str(data) + " " + str(resp.text))
return None
def get_fasp_sra_download(run_accession: str):
"""Get an URL for SRA using the FASP protocol.
These URLs should not actually include the protcol."""
full_url = get_sra_download_url(run_accession, 'fasp')
if full_url:
sra_url = full_url.split('fasp://')[1]
return sra_url
else:
return None
def get_https_sra_download(run_accession: str):
"""Get an HTTPS URL for SRA."""
return get_sra_download_url(run_accession, 'https')
def load_blacklist(blacklist_csv: str = "config/RNASeqRunBlackList.csv"):
""" Loads the SRA run blacklist """
blacklisted_samples = []
with open(blacklist_csv, encoding='utf-8') as blacklist_file:
reader = csv.reader(blacklist_file, )
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num is 1:
continue
blacklisted_samples.append(line[0].strip())
return blacklisted_samples
def get_nomad_jobs_breakdown():
jobs = get_nomad_jobs()
parameterized_jobs = [job for job in jobs if job['ParameterizedJob']]
def get_job_type(job):
return get_job_details(job)[0]
def get_job_volume(job):
return get_job_details(job)[1]
# groupby must be executed on a sorted iterable https://docs.python.org/2/library/itertools.html#itertools.groupby
sorted_jobs_by_type = sorted(filter(get_job_type, parameterized_jobs), key=get_job_type)
aggregated_jobs_by_type = groupby(sorted_jobs_by_type, get_job_type)
nomad_pending_jobs_by_type, nomad_running_jobs_by_type = \
_aggregate_nomad_jobs(aggregated_jobs_by_type)
# To get the total jobs for running and pending, the easiest
# AND the most efficient way is to sum up the stats we've
# already partially summed up.
nomad_running_jobs = sum(num_jobs for job_type, num_jobs in nomad_running_jobs_by_type.items())
nomad_pending_jobs = sum(num_jobs for job_type, num_jobs in nomad_pending_jobs_by_type.items())
sorted_jobs_by_volume = sorted(filter(get_job_volume, parameterized_jobs), key=get_job_volume)
aggregated_jobs_by_volume = groupby(sorted_jobs_by_volume, get_job_volume)
nomad_pending_jobs_by_volume, nomad_running_jobs_by_volume = \
_aggregate_nomad_jobs(aggregated_jobs_by_volume)
return {
"nomad_pending_jobs": nomad_pending_jobs,
"nomad_running_jobs": nomad_running_jobs,
"nomad_pending_jobs_by_type": nomad_pending_jobs_by_type,
"nomad_running_jobs_by_type": nomad_running_jobs_by_type,
"nomad_pending_jobs_by_volume": nomad_pending_jobs_by_volume,
"nomad_running_jobs_by_volume": nomad_running_jobs_by_volume
}
def get_job_details(job):
"""Given a Nomad Job, as returned by the API, returns the type and volume id"""
# Surveyor jobs don't have ids and RAM, so handle them specially.
if job["ID"].startswith("SURVEYOR"):
return "SURVEYOR", False
# example SALMON_1_2323
name_match = re.match(r"(?P<type>\w+)_(?P<volume_id>\d+)_\d+$", job["ID"])
if not name_match:
return False, False
return name_match.group('type'), name_match.group('volume_id')
def _aggregate_nomad_jobs(aggregated_jobs):
"""Aggregates the job counts.
This is accomplished by using the stats that each
parameterized job has about its children jobs.
`jobs` should be a response from the Nomad API's jobs endpoint.
"""
nomad_running_jobs = {}
nomad_pending_jobs = {}
for (aggregate_key, group) in aggregated_jobs:
pending_jobs_count = 0
running_jobs_count = 0
for job in group:
if job["JobSummary"]["Children"]: # this can be null
pending_jobs_count += job["JobSummary"]["Children"]["Pending"]
running_jobs_count += job["JobSummary"]["Children"]["Running"]
nomad_pending_jobs[aggregate_key] = pending_jobs_count
nomad_running_jobs[aggregate_key] = running_jobs_count
return nomad_pending_jobs, nomad_running_jobs
def queryset_iterator(queryset, page_size):
""" use the performant paginator to iterate over a queryset """
paginator = PerformantPaginator(queryset, page_size)
page = paginator.page()
while True:
for item in page.object_list:
yield item
if not page.has_next():
break
else:
page = paginator.page(page.next_page_number())
<|code_end|>
workers/data_refinery_workers/processors/management/commands/create_compendia.py
<|code_start|>import sys
from django.core.management.base import BaseCommand
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.performant_pagination.pagination import PerformantPaginator
from data_refinery_common.models import (
Experiment,
Sample,
Organism,
ProcessorJob,
Dataset,
ProcessorJobDatasetAssociation,
ExperimentOrganismAssociation,
ExperimentSampleAssociation
)
from data_refinery_workers.processors import create_compendia
logger = get_and_configure_logger(__name__)
PAGE_SIZE = 2000
def create_job_for_organism(organism=Organism, quant_sf_only=False, svd_algorithm='ARPACK'):
"""Returns a compendia job for the provided organism.
Fetch all of the experiments and compile large but normally formated Dataset.
"""
data = {}
experiments = Experiment.objects.filter(organisms=organism).prefetch_related('samples')
paginator = PerformantPaginator(experiments, PAGE_SIZE)
page = paginator.page()
while True:
for experiment in page.object_list:
data[experiment.accession_code] = list(experiment.samples.filter(organism=organism).values_list('accession_code', flat=True))
if not page.has_next():
break
else:
page = paginator.page(page.next_page_number())
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
dset = Dataset()
dset.data = data
dset.scale_by = 'NONE'
dset.aggregate_by = 'SPECIES'
dset.quantile_normalize = False
dset.quant_sf_only = quant_sf_only
dset.svd_algorithm = svd_algorithm
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
return job
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organisms",
type=str,
help=("Comma separated list of organism names."))
parser.add_argument(
"--quant-sf-only",
type=lambda x: x == "True",
help=("Whether to create a quantpendium or normal compendium."))
parser.add_argument(
"--svd-algorithm",
type=str,
help=("Specify SVD algorithm applied during imputation ARPACK, RANDOMIZED or NONE to skip."))
def handle(self, *args, **options):
"""Create a compendium for one or more organisms.
If --organism is supplied will immediately create a compedium
for it. If not a new job will be dispatched for each organism
with enough microarray samples except for human and mouse.
"""
if options["organisms"] is None:
all_organisms = Organism.objects.exclude(name__in=["HOMO_SAPIENS", "MUS_MUSCULUS"])
else:
organisms = options["organisms"].upper().replace(" ", "_").split(",")
all_organisms = Organism.objects.filter(name__in=organisms)
# I think we could just use options["quant_sf_only"] but I
# wanna make sure that values that are not True do not trigger
# a truthy evaluation.
quant_sf_only = False
if options["quant_sf_only"] is True:
quant_sf_only = True
# default algorithm to arpack until we decide that ranomized is preferred
svd_algorithm = 'NONE' if quant_sf_only else 'ARPACK'
if options["svd_algorithm"] in ['ARPACK', 'RANDOMIZED', 'NONE']:
svd_algorithm = options["svd_algorithm"]
logger.error(all_organisms)
if all_organisms.count() > 1:
for organism in all_organisms:
logger.error(organism)
job = create_job_for_organism(organism, quant_sf_only, svd_algorithm)
logger.info("Sending CREATE_COMPENDIA for Organism", job_id=str(job.pk), organism=str(organism))
send_job(ProcessorPipeline.CREATE_COMPENDIA, job)
else:
job = create_job_for_organism(all_organisms[0], quant_sf_only, svd_algorithm)
create_compendia.create_compendia(job.id)
sys.exit(0)
<|code_end|>
| common/data_refinery_common/utils.py
<|code_start|>import csv
import hashlib
import io
import os
import re
from functools import partial
from itertools import groupby
from typing import Dict, Set
from urllib.parse import urlparse
import nomad
import requests
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from retrying import retry
from data_refinery_common.performant_pagination.pagination import PerformantPaginator
# Found: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
METADATA_URL = "http://169.254.169.254/latest/meta-data"
INSTANCE_ID = None
SUPPORTED_MICROARRAY_PLATFORMS = None
SUPPORTED_RNASEQ_PLATFORMS = None
READABLE_PLATFORM_NAMES = None
def get_env_variable(var_name: str, default: str=None) -> str:
""" Get an environment variable or return a default value """
try:
return os.environ[var_name]
except KeyError:
if default:
return default
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
def get_env_variable_gracefully(var_name: str, default: str=None) -> str:
"""
Get an environment variable, or return a default value, but always fail gracefully and return
something rather than raising an ImproperlyConfigured error.
"""
try:
return os.environ[var_name]
except KeyError:
return default
def get_instance_id() -> str:
"""Returns the AWS instance id where this is running or "local"."""
global INSTANCE_ID
if INSTANCE_ID is None:
if settings.RUNNING_IN_CLOUD:
@retry(stop_max_attempt_number=3)
def retrieve_instance_id():
return requests.get(os.path.join(METADATA_URL, "instance-id")).text
INSTANCE_ID = retrieve_instance_id()
else:
INSTANCE_ID = "local"
return INSTANCE_ID
def get_worker_id() -> str:
"""Returns <instance_id>/<thread_id>."""
return get_instance_id() + "/" + current_process().name
def get_volume_index(path='/home/user/data_store/VOLUME_INDEX') -> str:
""" Reads the contents of the VOLUME_INDEX file, else returns default """
if settings.RUNNING_IN_CLOUD:
default = "-1"
else:
default = "0"
try:
with open(path, 'r') as f:
v_id = f.read().strip()
return v_id
except Exception as e:
# Our configured logger needs util, so we use the standard logging library for just this.
import logging
logger = logging.getLogger(__name__)
logger.info(str(e))
logger.info("Could not read volume index file, using default: " + str(default))
return default
def get_nomad_jobs() -> list:
"""Calls nomad service and return all jobs"""
try:
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
return nomad_client.jobs.get_jobs()
except nomad.api.exceptions.BaseNomadException:
# Nomad is not available right now
return []
def get_active_volumes() -> Set[str]:
"""Returns a Set of indices for volumes that are currently mounted.
These can be used to determine which jobs would actually be able
to be placed if they were queued up.
"""
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
volumes = set()
try:
for node in nomad_client.nodes.get_nodes():
node_detail = nomad_client.node.get_node(node["ID"])
if 'Status' in node_detail and node_detail['Status'] == 'ready' \
and 'Meta' in node_detail and 'volume_index' in node_detail['Meta']:
volumes.add(node_detail['Meta']['volume_index'])
except nomad.api.exceptions.BaseNomadException:
# Nomad is down, return the empty set.
pass
return volumes
def get_active_volumes_detailed() -> Dict:
"""Returns the instance type and number of allocations (jobs) for each active volume
These can be used to determine which jobs would actually be able
to be placed if they were queued up.
"""
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
volumes = dict()
try:
for node in nomad_client.nodes.get_nodes():
node_detail = nomad_client.node.get_node(node["ID"])
allocations = len(nomad_client.node.get_allocations(node["ID"]))
if 'Status' in node_detail and node_detail['Status'] == 'ready' \
and 'Meta' in node_detail and 'volume_index' in node_detail['Meta']:
volume_info = dict()
volume_info["type"] = node_detail['Attributes'].get("platform.aws.instance-type",
None)
volume_info["allocations"] = allocations
volumes[node_detail['Meta']['volume_index']] = volume_info
except nomad.api.exceptions.BaseNomadException:
# Nomad is down, return the empty dict.
pass
return volumes
def get_supported_microarray_platforms(platforms_csv: str="config/supported_microarray_platforms.csv"
) -> list:
"""
Loads our supported microarray platforms file and returns a list of dictionaries
containing the internal accession, the external accession, and a boolean indicating
whether or not the platform supports brainarray.
CSV must be in the format:
Internal Accession | External Accession | Supports Brainarray
"""
global SUPPORTED_MICROARRAY_PLATFORMS
if SUPPORTED_MICROARRAY_PLATFORMS is not None:
return SUPPORTED_MICROARRAY_PLATFORMS
SUPPORTED_MICROARRAY_PLATFORMS = []
with open(platforms_csv) as platforms_file:
reader = csv.reader(platforms_file)
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num is 1:
continue
external_accession = line[1]
is_brainarray = True if line[2] == 'y' else False
SUPPORTED_MICROARRAY_PLATFORMS.append({"platform_accession": line[0],
"external_accession": external_accession,
"is_brainarray": is_brainarray})
# A-GEOD-13158 is the same platform as GPL13158 and this
# pattern is generalizable. Since we don't want to have to
# list a lot of platforms twice just with different prefixes,
# we just convert them and add them to the list.
if external_accession[:6] == "A-GEOD":
converted_accession = external_accession.replace("A-GEOD-", "GPL")
SUPPORTED_MICROARRAY_PLATFORMS.append({"platform_accession": line[0],
"external_accession": converted_accession,
"is_brainarray": is_brainarray})
# Our list of supported platforms contains both A-GEOD-*
# and GPL*, so convert both ways.
if external_accession[:3] == "GPL":
converted_accession = external_accession.replace("GPL", "A-GEOD-")
SUPPORTED_MICROARRAY_PLATFORMS.append({"platform_accession": line[0],
"external_accession": converted_accession,
"is_brainarray": is_brainarray})
return SUPPORTED_MICROARRAY_PLATFORMS
def get_supported_rnaseq_platforms(platforms_list: str="config/supported_rnaseq_platforms.txt"
) -> list:
"""
Returns a list of RNASeq platforms which are currently supported.
"""
global SUPPORTED_RNASEQ_PLATFORMS
if SUPPORTED_RNASEQ_PLATFORMS is not None:
return SUPPORTED_RNASEQ_PLATFORMS
SUPPORTED_RNASEQ_PLATFORMS = []
with open(platforms_list) as platforms_file:
for line in platforms_file:
SUPPORTED_RNASEQ_PLATFORMS.append(line.strip())
return SUPPORTED_RNASEQ_PLATFORMS
def get_readable_affymetrix_names(mapping_csv: str="config/readable_affymetrix_names.csv") -> Dict:
"""
Loads the mapping from human readble names to internal accessions for Affymetrix platforms.
CSV must be in the format:
Readable Name | Internal Accession
Returns a dictionary mapping from internal accessions to human readable names.
"""
global READABLE_PLATFORM_NAMES
if READABLE_PLATFORM_NAMES is not None:
return READABLE_PLATFORM_NAMES
READABLE_PLATFORM_NAMES = {}
with open(mapping_csv, encoding='utf-8') as mapping_file:
reader = csv.reader(mapping_file, )
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num is 1:
continue
READABLE_PLATFORM_NAMES[line[1]] = line[0]
return READABLE_PLATFORM_NAMES
def get_internal_microarray_accession(accession_code):
platforms = get_supported_microarray_platforms()
for platform in platforms:
if platform['external_accession'] == accession_code:
return platform['platform_accession']
elif platform['platform_accession'] == accession_code:
return platform['platform_accession']
return None
def get_normalized_platform(external_accession):
"""
Handles a weirdo cases, where external_accessions in the format
hugene10stv1 -> hugene10st
"""
matches = re.findall(r"stv\d$", external_accession)
for match in matches:
external_accession = external_accession.replace(match, 'st')
return external_accession
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ''
path = ''
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip('/')
return bucket, path
def get_s3_url(s3_bucket: str, s3_key: str) -> str:
"""
Calculates the s3 URL for a file from the bucket name and the file key.
"""
return "%s.s3.amazonaws.com/%s" % (s3_bucket, s3_key)
def calculate_file_size(absolute_file_path):
return os.path.getsize(absolute_file_path)
def calculate_sha1(absolute_file_path):
hash_object = hashlib.sha1()
with open(absolute_file_path, mode='rb') as open_file:
for buf in iter(partial(open_file.read, io.DEFAULT_BUFFER_SIZE), b''):
hash_object.update(buf)
return hash_object.hexdigest()
def get_sra_download_url(run_accession, protocol="fasp"):
"""Try getting the sra-download URL from CGI endpoint"""
#Ex: curl --data "acc=SRR6718414&accept-proto=fasp&version=2.0" https://www.ncbi.nlm.nih.gov/Traces/names/names.cgi
cgi_url = "https://www.ncbi.nlm.nih.gov/Traces/names/names.cgi"
data = "acc=" + run_accession + "&accept-proto=" + protocol + "&version=2.0"
try:
resp = requests.post(cgi_url, data=data)
except Exception as e:
# Our configured logger needs util, so we use the standard logging library for just this.
import logging
logger = logging.getLogger(__name__)
logger.exception("Bad CGI request!: " + str(cgi_url) + ", " + str(data))
return None
if resp.status_code != 200:
# This isn't on the new servers
return None
else:
try:
# From: '#2.0\nsrapub|DRR002116|2324796808|2013-07-03T05:51:55Z|50964cfc69091cdbf92ea58aaaf0ac1c||fasp://dbtest@sra-download.ncbi.nlm.nih.gov:data/sracloud/traces/dra0/DRR/000002/DRR002116|200|ok\n'
# To: 'dbtest@sra-download.ncbi.nlm.nih.gov:data/sracloud/traces/dra0/DRR/000002/DRR002116'
# Sometimes, the responses from names.cgi makes no sense at all on a per-accession-code basis. This helps us handle that.
# $ curl --data "acc=SRR5818019&accept-proto=fasp&version=2.0" https://www.ncbi.nlm.nih.gov/Traces/names/names.cgi
# 2.0\nremote|SRR5818019|434259775|2017-07-11T21:32:08Z|a4bfc16dbab1d4f729c4552e3c9519d1|||400|Only 'https' protocol is allowed for this object
protocol_header = protocol + '://'
sra_url = resp.text.split('\n')[1].split('|')[6]
return sra_url
except Exception as e:
# Our configured logger needs util, so we use the standard logging library for just this.
import logging
logger = logging.getLogger(__name__)
logger.exception("Error parsing CGI response: " + str(cgi_url) + " " + str(data) + " " + str(resp.text))
return None
def get_fasp_sra_download(run_accession: str):
"""Get an URL for SRA using the FASP protocol.
These URLs should not actually include the protcol."""
full_url = get_sra_download_url(run_accession, 'fasp')
if full_url:
sra_url = full_url.split('fasp://')[1]
return sra_url
else:
return None
def get_https_sra_download(run_accession: str):
"""Get an HTTPS URL for SRA."""
return get_sra_download_url(run_accession, 'https')
def load_blacklist(blacklist_csv: str = "config/RNASeqRunBlackList.csv"):
""" Loads the SRA run blacklist """
blacklisted_samples = []
with open(blacklist_csv, encoding='utf-8') as blacklist_file:
reader = csv.reader(blacklist_file, )
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num is 1:
continue
blacklisted_samples.append(line[0].strip())
return blacklisted_samples
def get_nomad_jobs_breakdown():
jobs = get_nomad_jobs()
parameterized_jobs = [job for job in jobs if job['ParameterizedJob']]
def get_job_type(job):
return get_job_details(job)[0]
def get_job_volume(job):
return get_job_details(job)[1]
# groupby must be executed on a sorted iterable https://docs.python.org/2/library/itertools.html#itertools.groupby
sorted_jobs_by_type = sorted(filter(get_job_type, parameterized_jobs), key=get_job_type)
aggregated_jobs_by_type = groupby(sorted_jobs_by_type, get_job_type)
nomad_pending_jobs_by_type, nomad_running_jobs_by_type = \
_aggregate_nomad_jobs(aggregated_jobs_by_type)
# To get the total jobs for running and pending, the easiest
# AND the most efficient way is to sum up the stats we've
# already partially summed up.
nomad_running_jobs = sum(num_jobs for job_type, num_jobs in nomad_running_jobs_by_type.items())
nomad_pending_jobs = sum(num_jobs for job_type, num_jobs in nomad_pending_jobs_by_type.items())
sorted_jobs_by_volume = sorted(filter(get_job_volume, parameterized_jobs), key=get_job_volume)
aggregated_jobs_by_volume = groupby(sorted_jobs_by_volume, get_job_volume)
nomad_pending_jobs_by_volume, nomad_running_jobs_by_volume = \
_aggregate_nomad_jobs(aggregated_jobs_by_volume)
return {
"nomad_pending_jobs": nomad_pending_jobs,
"nomad_running_jobs": nomad_running_jobs,
"nomad_pending_jobs_by_type": nomad_pending_jobs_by_type,
"nomad_running_jobs_by_type": nomad_running_jobs_by_type,
"nomad_pending_jobs_by_volume": nomad_pending_jobs_by_volume,
"nomad_running_jobs_by_volume": nomad_running_jobs_by_volume
}
def get_job_details(job):
"""Given a Nomad Job, as returned by the API, returns the type and volume id"""
# Surveyor jobs don't have ids and RAM, so handle them specially.
if job["ID"].startswith("SURVEYOR"):
return "SURVEYOR", False
# example SALMON_1_2323
name_match = re.match(r"(?P<type>\w+)_(?P<volume_id>\d+)_\d+$", job["ID"])
if not name_match:
return False, False
return name_match.group('type'), name_match.group('volume_id')
def _aggregate_nomad_jobs(aggregated_jobs):
"""Aggregates the job counts.
This is accomplished by using the stats that each
parameterized job has about its children jobs.
`jobs` should be a response from the Nomad API's jobs endpoint.
"""
nomad_running_jobs = {}
nomad_pending_jobs = {}
for (aggregate_key, group) in aggregated_jobs:
pending_jobs_count = 0
running_jobs_count = 0
for job in group:
if job["JobSummary"]["Children"]: # this can be null
pending_jobs_count += job["JobSummary"]["Children"]["Pending"]
running_jobs_count += job["JobSummary"]["Children"]["Running"]
nomad_pending_jobs[aggregate_key] = pending_jobs_count
nomad_running_jobs[aggregate_key] = running_jobs_count
return nomad_pending_jobs, nomad_running_jobs
def queryset_iterator(queryset, page_size = 2000):
""" use the performant paginator to iterate over a queryset """
paginator = PerformantPaginator(queryset, page_size)
page = paginator.page()
while True:
for item in page.object_list:
yield item
if not page.has_next():
break
else:
page = paginator.page(page.next_page_number())
<|code_end|>
workers/data_refinery_workers/processors/management/commands/create_compendia.py
<|code_start|>import sys
from django.core.management.base import BaseCommand
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (Dataset, Experiment, Organism,
ProcessorJob,
ProcessorJobDatasetAssociation)
from data_refinery_common.utils import queryset_iterator
logger = get_and_configure_logger(__name__)
def create_job_for_organism(organism=Organism, quant_sf_only=False, svd_algorithm='ARPACK'):
"""Returns a compendia job for the provided organism.
Fetch all of the experiments and compile large but normally formated Dataset.
"""
data = {}
experiments = Experiment.objects.filter(organisms=organism).prefetch_related('samples')
for experiment in queryset_iterator(experiments):
data[experiment.accession_code] = list(experiment.samples.filter(organism=organism).values_list('accession_code', flat=True))
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
dset = Dataset()
dset.data = data
dset.scale_by = 'NONE'
dset.aggregate_by = 'SPECIES'
dset.quantile_normalize = False
dset.quant_sf_only = quant_sf_only
dset.svd_algorithm = svd_algorithm
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
return job
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organisms",
type=str,
help=("Comma separated list of organism names."))
parser.add_argument(
"--quant-sf-only",
type=lambda x: x == "True",
help=("Whether to create a quantpendium or normal compendium."))
parser.add_argument(
"--svd-algorithm",
type=str,
help=("Specify SVD algorithm applied during imputation ARPACK, RANDOMIZED or NONE to skip."))
def handle(self, *args, **options):
"""Create a compendium for one or more organisms.
If --organism is supplied will immediately create a compedium
for it. If not a new job will be dispatched for each organism
with enough microarray samples except for human and mouse.
"""
if options["organisms"] is None:
all_organisms = Organism.objects.exclude(name__in=["HOMO_SAPIENS", "MUS_MUSCULUS"])
else:
organisms = options["organisms"].upper().replace(" ", "_").split(",")
all_organisms = Organism.objects.filter(name__in=organisms)
# I think we could just use options["quant_sf_only"] but I
# wanna make sure that values that are not True do not trigger
# a truthy evaluation.
quant_sf_only = False
if options["quant_sf_only"] is True:
quant_sf_only = True
# default algorithm to arpack until we decide that ranomized is preferred
svd_algorithm = 'NONE' if quant_sf_only else 'ARPACK'
if options["svd_algorithm"] in ['ARPACK', 'RANDOMIZED', 'NONE']:
svd_algorithm = options["svd_algorithm"]
logger.debug(all_organisms)
for organism in all_organisms:
logger.debug(organism)
job = create_job_for_organism(organism, quant_sf_only, svd_algorithm)
logger.info("Sending CREATE_COMPENDIA for Organism", job_id=str(job.pk), organism=str(organism))
send_job(ProcessorPipeline.CREATE_COMPENDIA, job)
sys.exit(0)
<|code_end|>
|
[HOTFIX] Prevent compendia jobs from getting cleaned up and fix QN jobs
## Issue Number
#1728
#1726
#1727
| foreman/data_refinery_foreman/foreman/management/commands/create_compendia.py
<|code_start|>import sys
from django.core.management.base import BaseCommand
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (Dataset, Experiment, Organism,
ProcessorJob,
ProcessorJobDatasetAssociation)
from data_refinery_common.utils import queryset_iterator
logger = get_and_configure_logger(__name__)
def create_job_for_organism(organism=Organism, quant_sf_only=False, svd_algorithm='ARPACK'):
"""Returns a compendia job for the provided organism.
Fetch all of the experiments and compile large but normally formated Dataset.
"""
data = {}
experiments = Experiment.objects.filter(organisms=organism).prefetch_related('samples')
for experiment in queryset_iterator(experiments):
data[experiment.accession_code] = list(experiment.samples.filter(organism=organism).values_list('accession_code', flat=True))
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
dset = Dataset()
dset.data = data
dset.scale_by = 'NONE'
dset.aggregate_by = 'SPECIES'
dset.quantile_normalize = False
dset.quant_sf_only = quant_sf_only
dset.svd_algorithm = svd_algorithm
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
return job
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organisms",
type=str,
help=("Comma separated list of organism names."))
parser.add_argument(
"--quant-sf-only",
type=lambda x: x == "True",
help=("Whether to create a quantpendium or normal compendium."))
parser.add_argument(
"--svd-algorithm",
type=str,
help=("Specify SVD algorithm applied during imputation ARPACK, RANDOMIZED or NONE to skip."))
def handle(self, *args, **options):
"""Create a compendium for one or more organisms.
If --organism is supplied will immediately create a compedium
for it. If not a new job will be dispatched for each organism
with enough microarray samples except for human and mouse.
"""
if options["organisms"] is None:
all_organisms = Organism.objects.exclude(name__in=["HOMO_SAPIENS", "MUS_MUSCULUS"])
else:
organisms = options["organisms"].upper().replace(" ", "_").split(",")
all_organisms = Organism.objects.filter(name__in=organisms)
# I think we could just use options["quant_sf_only"] but I
# wanna make sure that values that are not True do not trigger
# a truthy evaluation.
quant_sf_only = False
if options["quant_sf_only"] is True:
quant_sf_only = True
# default algorithm to arpack until we decide that ranomized is preferred
svd_algorithm = 'NONE' if quant_sf_only else 'ARPACK'
if options["svd_algorithm"] in ['ARPACK', 'RANDOMIZED', 'NONE']:
svd_algorithm = options["svd_algorithm"]
logger.debug(all_organisms)
for organism in all_organisms:
logger.debug(organism)
job = create_job_for_organism(organism, quant_sf_only, svd_algorithm)
logger.info("Sending CREATE_COMPENDIA for Organism", job_id=str(job.pk), organism=str(organism))
send_job(ProcessorPipeline.CREATE_COMPENDIA, job)
sys.exit(0)
<|code_end|>
| foreman/data_refinery_foreman/foreman/management/commands/create_compendia.py
<|code_start|>import sys
from django.core.management.base import BaseCommand
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (Dataset, Experiment, Organism,
ProcessorJob,
ProcessorJobDatasetAssociation)
from data_refinery_common.utils import queryset_iterator
logger = get_and_configure_logger(__name__)
def create_job_for_organism(organism=Organism, quant_sf_only=False, svd_algorithm='ARPACK'):
"""Returns a compendia job for the provided organism.
Fetch all of the experiments and compile large but normally formated Dataset.
"""
data = {}
experiments = Experiment.objects.filter(organisms=organism).prefetch_related('samples')
for experiment in queryset_iterator(experiments):
data[experiment.accession_code] = list(experiment.samples.filter(organism=organism)\
.values_list('accession_code', flat=True))
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
dset = Dataset()
dset.data = data
dset.scale_by = 'NONE'
# The quantpendias should be aggregated by species
dset.aggregate_by = 'EXPERIMENT' if quant_sf_only else 'SPECIES'
dset.quantile_normalize = False
dset.quant_sf_only = quant_sf_only
dset.svd_algorithm = svd_algorithm
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
return job
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organisms",
type=str,
help=("Comma separated list of organism names."))
parser.add_argument(
"--quant-sf-only",
type=lambda x: x == "True",
help=("Whether to create a quantpendium or normal compendium. Quantpendium will be aggregated by EXPERIMENT"))
parser.add_argument(
"--svd-algorithm",
type=str,
help=("Specify SVD algorithm applied during imputation ARPACK, RANDOMIZED or NONE to skip."))
def handle(self, *args, **options):
"""Create a compendium for one or more organisms.
If --organism is supplied will immediately create a compedium
for it. If not a new job will be dispatched for each organism
with enough microarray samples except for human and mouse.
"""
if options["organisms"] is None:
all_organisms = Organism.objects.exclude(name__in=["HOMO_SAPIENS", "MUS_MUSCULUS"])
else:
organisms = options["organisms"].upper().replace(" ", "_").split(",")
all_organisms = Organism.objects.filter(name__in=organisms)
# I think we could just use options["quant_sf_only"] but I
# wanna make sure that values that are not True do not trigger
# a truthy evaluation.
quant_sf_only = options["quant_sf_only"] is True
# default algorithm to arpack until we decide that ranomized is preferred
svd_algorithm = 'NONE' if quant_sf_only else 'ARPACK'
if options["svd_algorithm"] in ['ARPACK', 'RANDOMIZED', 'NONE']:
svd_algorithm = options["svd_algorithm"]
logger.debug('Generating compendia for organisms', organisms=all_organisms)
for organism in all_organisms:
job = create_job_for_organism(organism, quant_sf_only, svd_algorithm)
logger.info("Sending CREATE_COMPENDIA for Organism", job_id=str(job.pk), organism=str(organism))
send_job(ProcessorPipeline.CREATE_COMPENDIA, job)
sys.exit(0)
<|code_end|>
|
API Stats include timestamp of last broken cache
### Context
Our database has a 10 minute cache.
### Problem or idea
It'd be nifty to be able to look at the stats endpoint response and figure out when it was last busted.
### Solution or next step
Include that timestamp.
| api/data_refinery_api/views.py
<|code_start|>from datetime import timedelta, datetime
import requests
import nomad
from typing import Dict
from itertools import groupby
from re import match
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.db.models import Count, Prefetch, DateTimeField
from django.db.models.functions import Trunc
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import F, Q
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_TERMS,
LOOKUP_FILTER_RANGE,
LOOKUP_FILTER_PREFIX,
LOOKUP_FILTER_WILDCARD,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_GTE,
LOOKUP_QUERY_LT,
LOOKUP_QUERY_LTE,
LOOKUP_QUERY_EXCLUDE,
)
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
IdsFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackend
)
from django_filters.rest_framework import DjangoFilterBackend
import django_filters
from elasticsearch_dsl import TermsFacet, DateHistogramFacet
from rest_framework import status, filters, generics, mixins
from rest_framework.exceptions import APIException, NotFound
from rest_framework.exceptions import ValidationError
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from data_refinery_api.serializers import (
ComputationalResultSerializer,
ComputationalResultWithUrlSerializer,
DetailedExperimentSerializer,
DetailedSampleSerializer,
ExperimentSerializer,
InstitutionSerializer,
OrganismIndexSerializer,
OrganismSerializer,
PlatformSerializer,
ProcessorSerializer,
SampleSerializer,
CompendiaSerializer,
CompendiaWithUrlSerializer,
QNTargetSerializer,
ComputedFileListSerializer,
# Job
DownloaderJobSerializer,
ProcessorJobSerializer,
SurveyJobSerializer,
# Dataset
APITokenSerializer,
CreateDatasetSerializer,
DatasetSerializer,
)
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SurveyJob,
)
from data_refinery_common.models.documents import (
ExperimentDocument
)
from data_refinery_common.utils import (
get_env_variable,
get_active_volumes,
get_nomad_jobs_breakdown,
)
from data_refinery_common.logging import get_and_configure_logger
from .serializers import ExperimentDocumentSerializer
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
logger = get_and_configure_logger(__name__)
##
# Variables
##
JOB_CREATED_AT_CUTOFF = datetime(2019, 6, 5, tzinfo=timezone.utc)
##
# ElasticSearch
##
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from six import iteritems
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet['facet'].get_aggregation()
queryset.aggs.bucket(field, agg)\
.metric('total_samples', 'cardinality', field='downloadable_samples', precision_threshold=40000)
return queryset
##
# ElasticSearch powered Search and Filter
##
@method_decorator(name='list', decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name='technology', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name='has_publication', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name='platform', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name='organism', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name='num_processed_samples', in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
"""))
class ExperimentDocumentView(DocumentViewSet):
""" ElasticSearch powered experiment search. """
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended
]
# Primitive
lookup_field = 'id'
# Define search fields
# Is this exhaustive enough?
search_fields = {
'title': {'boost': 10},
'publication_authors': {'boost': 8}, # "People will search themselves"
'publication_title': {'boost': 5},
'submitter_institution': {'boost': 3},
'description': {'boost': 2},
'accession_code': None,
'alternate_accession_code': None,
'publication_doi': None,
'pubmed_id': None,
'sample_metadata_fields': None,
'platform_names': None
}
# Define filtering fields
filter_fields = {
'id': {
'field': '_id',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN
],
},
'technology': 'technology',
'has_publication': 'has_publication',
'platform': 'platform_accession_codes',
'organism': 'organism_names',
'num_processed_samples': {
'field': 'num_processed_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
},
'num_downloadable_samples': {
'field': 'num_downloadable_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
}
}
# Define ordering fields
ordering_fields = {
'id': 'id',
'title': 'title.raw',
'description': 'description.raw',
'num_total_samples': 'num_total_samples',
'num_downloadable_samples': 'num_downloadable_samples',
'source_first_published': 'source_first_published'
}
# Specify default ordering
ordering = ('_score', '-num_total_samples', 'id', 'title', 'description', '-source_first_published')
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
'technology': {
'field': 'technology',
'facet': TermsFacet,
'enabled': True # These are enabled by default, which is more expensive but more simple.
},
'organism_names': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': True,
'options': {
'size': 999999
}
},
'platform_accession_codes': {
'field': 'platform_accession_codes',
'facet': TermsFacet,
'enabled': True,
'global': False,
'options': {
'size': 999999
}
},
'has_publication': {
'field': 'has_publication',
'facet': TermsFacet,
'enabled': True,
'global': False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
'technology_global': {
'field': 'technology',
'facet': TermsFacet,
'enabled': False,
'global': True
},
'organism_names_global': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'platform_names_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'has_publication_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
},
}
faceted_search_param = 'facet'
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data['facets'] = self.transform_es_facets(response.data['facets'])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet['buckets']:
if field == 'has_publication':
filter_group[bucket['key_as_string']] = bucket['total_samples']['value']
else:
filter_group[bucket['key']] = bucket['total_samples']['value']
result[field] = filter_group
return result
##
# Dataset
##
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(name='get', decorator=swagger_auto_schema(operation_description="View a single Dataset.",manual_parameters=[
openapi.Parameter(
name='details', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)]))
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None)) # partial updates not supported
@method_decorator(name='put', decorator=swagger_auto_schema(operation_description="""
Modify an existing Dataset.
Set `start` to `true` along with a valid activated API token (from `/token/`) to begin smashing and delivery.
You must also supply `email_address` with `start`, though this will never be serialized back to you.
"""))
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = 'id'
@staticmethod
def _should_display_on_engagement_bot(email: str) -> bool:
return email is not None \
and email.find("cansav09") != 0 \
and email.find("arielsvn") != 0 \
and email.find("jaclyn.n.taroni") != 0 \
and email.find("kurt.wheeler") != 0 \
and email.find("greenescientist") != 0 \
and email.find("@alexslemonade.org") == -1 \
and email.find("miserlou") != 0 \
and email.find("michael.zietz@gmail.com") != 0 \
and email.find("d.prasad") != 0 \
and email.find("daniel.himmelstein@gmail.com") != 0 \
and email.find("dv.prasad991@gmail.com") != 0
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that. """
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data['data'].keys():
accessions = new_data['data'][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(experiment.samples.filter(is_processed=True, organism__in=qn_organisms).values_list('accession_code', flat=True))
new_data['data'][key] = sample_codes
if old_object.is_processed:
raise APIException("You may not update Datasets which have already been processed")
if new_data.get('start'):
# Make sure we have a valid activated token.
token_id = self.request.data.get('token_id', None)
if not token_id:
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
raise APIException("You must provide an active API token ID")
supplied_email_address = self.request.data.get('email_address', None)
email_ccdl_ok = self.request.data.get('email_ccdl_ok', False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if supplied_email_address is not None:
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get('no_send_job', False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException("Unable to queue download job. Something has gone"
" wrong and we have been notified about it.")
serializer.validated_data['is_processing'] = True
obj = serializer.save()
if settings.RUNNING_IN_CLOUD and settings.ENGAGEMENTBOT_WEBHOOK is not None \
and DatasetView._should_display_on_engagement_bot(supplied_email_address):
try:
try:
remote_ip = get_client_ip(self.request)
city = requests.get('https://ipapi.co/' + remote_ip + '/json/', timeout=10).json()['city']
except Exception:
city = "COULD_NOT_DETERMINE"
new_user_text = "New user " + supplied_email_address + " from " + city + " [" + remote_ip + "] downloaded a dataset! (" + str(old_object.id) + ")"
webhook_url = settings.ENGAGEMENTBOT_WEBHOOK
slack_json = {
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments":[
{ "color": "good",
"text": new_user_text
}
]
}
response = requests.post(
webhook_url,
json=slack_json,
headers={'Content-Type': 'application/json'},
timeout=10
)
except Exception as e:
# It doens't really matter if this didn't work
logger.error(e)
pass
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data['data'] = old_data
serializer.validated_data['aggregate_by'] = old_aggregate
serializer.save()
class CreateApiTokenView(generics.CreateAPIView):
"""
token_create
There're several endpoints like [/dataset](#tag/dataset) and [/results](#tag/results) that return
S3 urls where users can download the files we produce, however in order to get those files people
need to accept our terms of use by creating a token and activating it.
```
POST /token
PUT /token/{token-id} is_active=True
```
The token id needs to be sent on the `API_KEY` header on http requests.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
model = APIToken
serializer_class = APITokenSerializer
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None))
class APITokenView(generics.RetrieveUpdateAPIView):
"""
Read and modify Api Tokens.
get:
Return details about a specific token.
put:
This can be used to activate a specific token by sending `is_activated: true`.
"""
model = APIToken
lookup_field = 'id'
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
##
# Experiments
##
class ExperimentList(generics.ListAPIView):
""" Paginated list of all experiments. Advanced filtering can be done with the `/search` endpoint. """
model = Experiment
queryset = Experiment.public_objects.all()
serializer_class = ExperimentSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'description',
'accession_code',
'alternate_accession_code',
'source_database',
'source_url',
'has_publication',
'publication_title',
'publication_doi',
'pubmed_id',
'organisms',
'submitter_institution',
'created_at',
'last_modified',
'source_first_published',
'source_last_modified',
)
class ExperimentDetail(generics.RetrieveAPIView):
""" Retrieve details for an experiment given it's accession code """
lookup_field = "accession_code"
queryset = Experiment.public_objects.all()
serializer_class = DetailedExperimentSerializer
##
# Samples
##
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='dataset_id', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns samples that are added to a dataset.",
),
openapi.Parameter(
name='experiment_accession_code', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns only the samples associated with an experiment accession code.",
),
openapi.Parameter(
name='accession_codes', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Provide a list of sample accession codes separated by commas and the endpoint will only return information about these samples.",
),
]))
class SampleList(generics.ListAPIView):
""" Returns detailed information about Samples """
model = Sample
serializer_class = DetailedSampleSerializer
filter_backends = (filters.OrderingFilter, DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('-is_processed')
filterset_fields = (
'title',
'organism',
'source_database',
'source_archive_url',
'has_raw',
'platform_name',
'technology',
'manufacturer',
'sex',
'age',
'specimen_part',
'genotype',
'disease',
'disease_stage',
'cell_line',
'treatment',
'race',
'subject',
'compound',
'time',
'is_processed',
'is_public'
)
def get_queryset(self):
"""
ref https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
"""
queryset = Sample.public_objects \
.prefetch_related('organism') \
.prefetch_related('results') \
.prefetch_related('results__processor') \
.prefetch_related('results__computationalresultannotation_set') \
.prefetch_related('results__computedfile_set') \
.filter(**self.get_query_params_filters())
# case insensitive search https://docs.djangoproject.com/en/2.1/ref/models/querysets/#icontains
filter_by = self.request.query_params.get('filter_by', None)
if filter_by:
queryset = queryset.filter(Q(title__icontains=filter_by) |
Q(sex__icontains=filter_by) |
Q(age__icontains=filter_by) |
Q(specimen_part__icontains=filter_by) |
Q(genotype__icontains=filter_by) |
Q(disease__icontains=filter_by) |
Q(disease_stage__icontains=filter_by) |
Q(cell_line__icontains=filter_by) |
Q(treatment__icontains=filter_by) |
Q(race__icontains=filter_by) |
Q(subject__icontains=filter_by) |
Q(compound__icontains=filter_by) |
Q(time__icontains=filter_by))
return queryset
def get_query_params_filters(self):
""" We do advanced filtering on the queryset depending on the query parameters.
This returns the parameters that should be used for that. """
filter_dict = dict()
ids = self.request.query_params.get('ids', None)
if ids is not None:
ids = [int(x) for x in ids.split(',')]
filter_dict['pk__in'] = ids
experiment_accession_code = self.request.query_params.get('experiment_accession_code', None)
if experiment_accession_code:
experiment = get_object_or_404(Experiment.objects.values('id'), accession_code=experiment_accession_code)
filter_dict['experiments__in'] = [experiment['id']]
accession_codes = self.request.query_params.get('accession_codes', None)
if accession_codes:
accession_codes = accession_codes.split(',')
filter_dict['accession_code__in'] = accession_codes
dataset_id = self.request.query_params.get('dataset_id', None)
if dataset_id:
dataset = get_object_or_404(Dataset, id=dataset_id)
# Python doesn't provide a prettier way of doing this that I know about.
filter_dict['accession_code__in'] = [item for sublist in dataset.data.values() for item in sublist]
# Accept Organism in both name and ID form
organism_name = self.request.query_params.get('organism__name', None)
if organism_name:
filter_dict['organism__name'] = organism_name
return filter_dict
class SampleDetail(generics.RetrieveAPIView):
""" Retrieve the details for a Sample given it's accession code """
lookup_field = "accession_code"
queryset = Sample.public_objects.all()
serializer_class = DetailedSampleSerializer
##
# Processor
##
class ProcessorList(generics.ListAPIView):
"""List all processors."""
queryset = Processor.objects.all()
serializer_class = ProcessorSerializer
##
# Results
##
class ComputationalResultsList(generics.ListAPIView):
"""
computational_results_list
This lists all `ComputationalResult`. Each one contains meta-information about the output of a computer process. (Ex Salmon).
This can return valid S3 urls if a valid [token](#tag/token) is sent in the header `HTTP_API_KEY`.
"""
queryset = ComputationalResult.public_objects.all()
def get_serializer_class(self):
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return ComputationalResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return ComputationalResultSerializer
def filter_queryset(self, queryset):
filter_dict = self.request.query_params.dict()
filter_dict.pop('limit', None)
filter_dict.pop('offset', None)
return queryset.filter(**filter_dict)
##
# Search Filter Models
##
class OrganismList(generics.ListAPIView):
"""
Unpaginated list of all the available organisms.
"""
queryset = Organism.objects.all()
serializer_class = OrganismSerializer
paginator = None
class PlatformList(generics.ListAPIView):
"""
Unpaginated list of all the available "platform" information
"""
serializer_class = PlatformSerializer
paginator = None
def get_queryset(self):
return Sample.public_objects.all().values("platform_accession_code", "platform_name").distinct()
class InstitutionList(generics.ListAPIView):
"""
Unpaginated list of all the available "institution" information
"""
serializer_class = InstitutionSerializer
paginator = None
def get_queryset(self):
return Experiment.public_objects.all().values("submitter_institution").distinct()
##
# Jobs
##
class SurveyJobList(generics.ListAPIView):
"""
List of all SurveyJob.
"""
model = SurveyJob
queryset = SurveyJob.objects.all()
serializer_class = SurveyJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = SurveyJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
class DownloaderJobList(generics.ListAPIView):
"""
List of all DownloaderJob
"""
model = DownloaderJob
queryset = DownloaderJob.objects.all()
serializer_class = DownloaderJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = DownloaderJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
class ProcessorJobList(generics.ListAPIView):
"""
List of all ProcessorJobs.
"""
model = ProcessorJob
queryset = ProcessorJob.objects.all()
serializer_class = ProcessorJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = ProcessorJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
###
# Statistics
###
class AboutStats(APIView):
""" Returns general stats for the site, used in the about page """
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
# static values for now
dummy = request.query_params.dict().pop('dummy', None)
if dummy:
# add a dummy response, calculated these on 09/25/2019
result = {
'samples_available': 904953 + 391022,
'total_size_in_bytes': 832195361132962,
'supported_organisms': 43 + 159,
'experiments_processed': 35785 + 8661
}
return Response(result)
result = {
'samples_available': self._get_samples_available(),
'total_size_in_bytes': OriginalFile.objects.aggregate(total_size=Sum('size_in_bytes'))['total_size'],
'supported_organisms': self._get_supported_organisms(),
'experiments_processed': self._get_experiments_processed()
}
return Response(result)
def _get_experiments_processed(self):
""" total experiments with at least one sample processed """
experiments_with_sample_processed = Experiment.objects\
.annotate(
processed_samples_count=Count('samples', filter=Q(samples__is_processed=True)),
)\
.filter(Q(processed_samples_count__gt=1))\
.count()
experiments_with_sample_quant = ComputedFile.objects\
.filter(filename='quant.sf', result__samples__is_processed=False)\
.values_list('result__samples__experiments', flat=True)\
.distinct()\
.count()
return experiments_with_sample_processed + experiments_with_sample_quant
def _get_supported_organisms(self):
""" count organisms with qn targets or that have at least one sample with quant files """
organisms_with_qn_targets = Organism.objects.filter(qn_target__isnull=False).count()
organisms_without_qn_targets = Organism.objects.filter(
qn_target__isnull=True,
sample__is_processed=False,
sample__technology='RNA-SEQ',
sample__results__computedfile__filename='quant.sf'
).distinct().count()
return organisms_with_qn_targets + organisms_without_qn_targets
def _get_samples_available(self):
""" count the total number of samples that are processed or that have a quant.sf file associated with them """
processed_samples = Sample.objects.filter(is_processed=True).count()
unprocessed_samples_with_quant = Sample.objects.filter(
is_processed=False,
technology='RNA-SEQ',
results__computedfile__filename='quant.sf'
).distinct().count()
return processed_samples + unprocessed_samples_with_quant
class Stats(APIView):
""" Statistics about the health of the system. """
@swagger_auto_schema(manual_parameters=[openapi.Parameter(
name='range', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=('day', 'week', 'month', 'year',)
)])
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop('range', None)
cached_stats = Stats.calculate_stats(range_param)
return Response(cached_stats)
@classmethod
def calculate_stats(cls, range_param):
data = {}
data['survey_jobs'] = cls._get_job_stats(SurveyJob.objects, range_param)
data['downloader_jobs'] = cls._get_job_stats(DownloaderJob.objects, range_param)
data['processor_jobs'] = cls._get_job_stats(ProcessorJob.objects, range_param)
data['experiments'] = cls._get_object_stats(Experiment.objects, range_param)
# processed and unprocessed samples stats
data['unprocessed_samples'] = cls._get_object_stats(Sample.objects.filter(is_processed=False), range_param, 'last_modified')
data['processed_samples'] = cls._get_object_stats(Sample.processed_objects, range_param, 'last_modified')
data['processed_samples']['last_hour'] = cls._samples_processed_last_hour()
data['processed_samples']['technology'] = {}
techs = Sample.processed_objects.values('technology').annotate(count=Count('technology'))
for tech in techs:
if not tech['technology'] or not tech['technology'].strip():
continue
data['processed_samples']['technology'][tech['technology']] = tech['count']
data['processed_samples']['organism'] = {}
organisms = Sample.processed_objects.values('organism__name').annotate(count=Count('organism__name'))
for organism in organisms:
if not organism['organism__name']:
continue
data['processed_samples']['organism'][organism['organism__name']] = organism['count']
data['processed_experiments'] = cls._get_object_stats(Experiment.processed_public_objects)
data['active_volumes'] = list(get_active_volumes())
data['dataset'] = cls._get_dataset_stats(range_param)
if range_param:
data['input_data_size'] = cls._get_input_data_size()
data['output_data_size'] = cls._get_output_data_size()
data.update(get_nomad_jobs_breakdown())
return data
EMAIL_USERNAME_BLACKLIST = [
'arielsvn',
'cansav09',
'd.prasad',
'daniel.himmelstein',
'dv.prasad991',
'greenescientist',
'jaclyn.n.taroni',
'kurt.wheeler91',
'michael.zietz',
'miserlou'
]
@classmethod
def _get_dataset_stats(cls, range_param):
"""Returns stats for processed datasets"""
filter_query = Q()
for username in Stats.EMAIL_USERNAME_BLACKLIST:
filter_query = filter_query | Q(email_address__startswith=username)
filter_query = filter_query | Q(email_address__endswith='@alexslemonade.org')
processed_datasets = Dataset.objects.filter(is_processed=True, email_address__isnull=False).exclude(filter_query)
result = processed_datasets.aggregate(
total=Count('id'),
aggregated_by_experiment=Count('id', filter=Q(aggregate_by='EXPERIMENT')),
aggregated_by_species=Count('id', filter=Q(aggregate_by='SPECIES')),
scale_by_none=Count('id', filter=Q(scale_by='NONE')),
scale_by_minmax=Count('id', filter=Q(scale_by='MINMAX')),
scale_by_standard=Count('id', filter=Q(scale_by='STANDARD')),
scale_by_robust=Count('id', filter=Q(scale_by='ROBUST')),
)
if range_param:
# We don't save the dates when datasets are processed, but we can use
# `last_modified`, since datasets aren't modified again after they are processed
result['timeline'] = cls._get_intervals(
processed_datasets,
range_param,
'last_modified'
).annotate(
total=Count('id'),
total_size=Sum('size_in_bytes')
)
return result
@classmethod
def _samples_processed_last_hour(cls):
current_date = datetime.now(tz=timezone.utc)
start = current_date - timedelta(hours=1)
return Sample.processed_objects.filter(last_modified__range=(start, current_date)).count()
@classmethod
def _get_input_data_size(cls):
total_size = OriginalFile.objects.filter(
sample__is_processed=True # <-- SLOW
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_output_data_size(cls):
total_size = ComputedFile.public_objects.all().filter(
s3_bucket__isnull=False,
s3_key__isnull=True
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_job_stats(cls, jobs, range_param):
start_filter = Q()
if range_param:
current_date = datetime.now(tz=timezone.utc)
start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}.get(range_param)
start_filter = start_filter | Q(start_time__gte=start_date) | Q(start_time__isnull=True)
result = jobs.filter(start_filter).aggregate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
open=Count('id', filter=Q(start_time__isnull=False,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
)
# via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
result['average_time'] = jobs.filter(start_filter).filter(
start_time__isnull=False,
end_time__isnull=False,
success=True
).aggregate(
average_time=Avg(F('end_time') - F('start_time'))
)['average_time']
if not result['average_time']:
result['average_time'] = 0
else:
result['average_time'] = result['average_time'].total_seconds()
if range_param:
result['timeline'] = cls._get_intervals(jobs, range_param) \
.annotate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True, success__isnull=True)),
open=Count('id', filter=Q(start_time__isnull=False, success__isnull=True)),
)
return result
@classmethod
def _get_object_stats(cls, objects, range_param=False, field='created_at'):
result = {
'total': objects.count()
}
if range_param:
result['timeline'] = cls._get_intervals(objects, range_param, field)\
.annotate(total=Count('id'))
return result
@classmethod
def _get_intervals(cls, objects, range_param, field='last_modified'):
range_to_trunc = {
'day': 'hour',
'week': 'day',
'month': 'day',
'year': 'month'
}
current_date = datetime.now(tz=timezone.utc)
range_to_start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}
# truncate the `last_modified` field by hour, day or month depending on the `range` param
# and annotate each object with that. This will allow us to count the number of objects
# on each interval with a single query
# ref https://stackoverflow.com/a/38359913/763705
return objects.annotate(start=Trunc(field, range_to_trunc.get(range_param), output_field=DateTimeField())) \
.values('start') \
.filter(start__gte=range_to_start_date.get(range_param))
###
# Transcriptome Indices
###
class TranscriptomeIndexList(generics.ListAPIView):
""" List all Transcriptome Indices. These are a special type of process result, necessary for processing other SRA samples. """
serializer_class = OrganismIndexSerializer
def get_queryset(self):
return OrganismIndex.objects.distinct("organism", "index_type")
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name='length', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="",
enum=('short', 'long',),
default='short'
),
]))
class TranscriptomeIndexDetail(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
def get_object(self):
organism_name = self.kwargs['organism_name'].upper()
length = self.request.query_params.get('length', 'short')
# Get the correct organism index object, serialize it, and return it
transcription_length = "TRANSCRIPTOME_" + length.upper()
try:
organism = Organism.objects.get(name=organism_name.upper())
organism_index = OrganismIndex.objects.exclude(s3_url__exact="")\
.distinct("organism", "index_type")\
.get(organism=organism, index_type=transcription_length)
return organism_index
except OrganismIndex.DoesNotExist:
raise Http404('Organism does not exists')
###
# Compendia
###
class CompendiaDetail(APIView):
"""
A very simple modified ComputedFile endpoint which only shows Compendia results.
"""
@swagger_auto_schema(deprecated=True)
def get(self, request, version, format=None):
computed_files = ComputedFile.objects.filter(is_compendia=True, is_public=True, is_qn_target=False).order_by('-created_at')
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
serializer = CompendiaWithUrlSerializer(computed_files, many=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
serializer = CompendiaSerializer(computed_files, many=True)
return Response(serializer.data)
###
# QN Targets
###
class QNTargetsAvailable(generics.ListAPIView):
"""
This is a list of all of the organisms which have available QN Targets
"""
serializer_class = OrganismSerializer
paginator = None
def get_queryset(self):
return Organism.get_objects_with_qn_targets()
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Eg `DANIO_RERIO`, `MUS_MUSCULUS`",
)], responses={404: 'QN Target not found for the given organism.'}))
class QNTargetsDetail(generics.RetrieveAPIView):
"""
Get a detailed view of the Quantile Normalization file for an organism.
"""
serializer_class = QNTargetSerializer
def get_object(self):
organism = self.kwargs['organism_name']
organism = organism.upper().replace(" ", "_")
try:
organism_id = Organism.get_object_for_name(organism).id
annotation = ComputationalResultAnnotation.objects.filter(
data__organism_id=organism_id,
data__is_qn=True
).order_by(
'-created_at'
).first()
qn_target = annotation.result.computedfile_set.first()
except Exception:
raise NotFound("Don't have a target for that organism!")
if not qn_target:
raise NotFound("Don't have a target for that organism!!")
return qn_target
##
# Computed Files
##
class ComputedFilesList(generics.ListAPIView):
"""
computed_files_list
ComputedFiles are representation of files created by data-refinery processes.
This can also be used to fetch all the compendia files we have generated with:
```
GET /computed_files?is_compendia=True&is_public=True
```
"""
queryset = ComputedFile.objects.all()
serializer_class = ComputedFileListSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = (
'id',
'is_qn_target',
'is_smashable',
'is_qc',
'is_compendia',
'quant_sf_only',
'svd_algorithm',
'compendia_version',
'created_at',
'last_modified',
)
ordering_fields = ('id', 'created_at', 'last_modified', 'compendia_version',)
ordering = ('-id',)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(ComputedFilesList, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
##
# Util
##
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '')
return ip
<|code_end|>
| api/data_refinery_api/views.py
<|code_start|>from datetime import timedelta, datetime
import requests
import nomad
from typing import Dict
from itertools import groupby
from re import match
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.db.models import Count, Prefetch, DateTimeField
from django.db.models.functions import Trunc
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import F, Q
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_TERMS,
LOOKUP_FILTER_RANGE,
LOOKUP_FILTER_PREFIX,
LOOKUP_FILTER_WILDCARD,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_GTE,
LOOKUP_QUERY_LT,
LOOKUP_QUERY_LTE,
LOOKUP_QUERY_EXCLUDE,
)
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
IdsFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackend
)
from django_filters.rest_framework import DjangoFilterBackend
import django_filters
from elasticsearch_dsl import TermsFacet, DateHistogramFacet
from rest_framework import status, filters, generics, mixins
from rest_framework.exceptions import APIException, NotFound
from rest_framework.exceptions import ValidationError
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from data_refinery_api.serializers import (
ComputationalResultSerializer,
ComputationalResultWithUrlSerializer,
DetailedExperimentSerializer,
DetailedSampleSerializer,
ExperimentSerializer,
InstitutionSerializer,
OrganismIndexSerializer,
OrganismSerializer,
PlatformSerializer,
ProcessorSerializer,
SampleSerializer,
CompendiaSerializer,
CompendiaWithUrlSerializer,
QNTargetSerializer,
ComputedFileListSerializer,
# Job
DownloaderJobSerializer,
ProcessorJobSerializer,
SurveyJobSerializer,
# Dataset
APITokenSerializer,
CreateDatasetSerializer,
DatasetSerializer,
)
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SurveyJob,
)
from data_refinery_common.models.documents import (
ExperimentDocument
)
from data_refinery_common.utils import (
get_env_variable,
get_active_volumes,
get_nomad_jobs_breakdown,
)
from data_refinery_common.logging import get_and_configure_logger
from .serializers import ExperimentDocumentSerializer
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
logger = get_and_configure_logger(__name__)
##
# Variables
##
JOB_CREATED_AT_CUTOFF = datetime(2019, 6, 5, tzinfo=timezone.utc)
##
# ElasticSearch
##
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from six import iteritems
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet['facet'].get_aggregation()
queryset.aggs.bucket(field, agg)\
.metric('total_samples', 'cardinality', field='downloadable_samples', precision_threshold=40000)
return queryset
##
# ElasticSearch powered Search and Filter
##
@method_decorator(name='list', decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name='technology', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name='has_publication', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name='platform', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name='organism', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name='num_processed_samples', in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
"""))
class ExperimentDocumentView(DocumentViewSet):
""" ElasticSearch powered experiment search. """
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended
]
# Primitive
lookup_field = 'id'
# Define search fields
# Is this exhaustive enough?
search_fields = {
'title': {'boost': 10},
'publication_authors': {'boost': 8}, # "People will search themselves"
'publication_title': {'boost': 5},
'submitter_institution': {'boost': 3},
'description': {'boost': 2},
'accession_code': None,
'alternate_accession_code': None,
'publication_doi': None,
'pubmed_id': None,
'sample_metadata_fields': None,
'platform_names': None
}
# Define filtering fields
filter_fields = {
'id': {
'field': '_id',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN
],
},
'technology': 'technology',
'has_publication': 'has_publication',
'platform': 'platform_accession_codes',
'organism': 'organism_names',
'num_processed_samples': {
'field': 'num_processed_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
},
'num_downloadable_samples': {
'field': 'num_downloadable_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
}
}
# Define ordering fields
ordering_fields = {
'id': 'id',
'title': 'title.raw',
'description': 'description.raw',
'num_total_samples': 'num_total_samples',
'num_downloadable_samples': 'num_downloadable_samples',
'source_first_published': 'source_first_published'
}
# Specify default ordering
ordering = ('_score', '-num_total_samples', 'id', 'title', 'description', '-source_first_published')
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
'technology': {
'field': 'technology',
'facet': TermsFacet,
'enabled': True # These are enabled by default, which is more expensive but more simple.
},
'organism_names': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': True,
'options': {
'size': 999999
}
},
'platform_accession_codes': {
'field': 'platform_accession_codes',
'facet': TermsFacet,
'enabled': True,
'global': False,
'options': {
'size': 999999
}
},
'has_publication': {
'field': 'has_publication',
'facet': TermsFacet,
'enabled': True,
'global': False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
'technology_global': {
'field': 'technology',
'facet': TermsFacet,
'enabled': False,
'global': True
},
'organism_names_global': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'platform_names_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'has_publication_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
},
}
faceted_search_param = 'facet'
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data['facets'] = self.transform_es_facets(response.data['facets'])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet['buckets']:
if field == 'has_publication':
filter_group[bucket['key_as_string']] = bucket['total_samples']['value']
else:
filter_group[bucket['key']] = bucket['total_samples']['value']
result[field] = filter_group
return result
##
# Dataset
##
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(name='get', decorator=swagger_auto_schema(operation_description="View a single Dataset.",manual_parameters=[
openapi.Parameter(
name='details', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)]))
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None)) # partial updates not supported
@method_decorator(name='put', decorator=swagger_auto_schema(operation_description="""
Modify an existing Dataset.
Set `start` to `true` along with a valid activated API token (from `/token/`) to begin smashing and delivery.
You must also supply `email_address` with `start`, though this will never be serialized back to you.
"""))
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = 'id'
@staticmethod
def _should_display_on_engagement_bot(email: str) -> bool:
return email is not None \
and email.find("cansav09") != 0 \
and email.find("arielsvn") != 0 \
and email.find("jaclyn.n.taroni") != 0 \
and email.find("kurt.wheeler") != 0 \
and email.find("greenescientist") != 0 \
and email.find("@alexslemonade.org") == -1 \
and email.find("miserlou") != 0 \
and email.find("michael.zietz@gmail.com") != 0 \
and email.find("d.prasad") != 0 \
and email.find("daniel.himmelstein@gmail.com") != 0 \
and email.find("dv.prasad991@gmail.com") != 0
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that. """
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data['data'].keys():
accessions = new_data['data'][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(experiment.samples.filter(is_processed=True, organism__in=qn_organisms).values_list('accession_code', flat=True))
new_data['data'][key] = sample_codes
if old_object.is_processed:
raise APIException("You may not update Datasets which have already been processed")
if new_data.get('start'):
# Make sure we have a valid activated token.
token_id = self.request.data.get('token_id', None)
if not token_id:
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
raise APIException("You must provide an active API token ID")
supplied_email_address = self.request.data.get('email_address', None)
email_ccdl_ok = self.request.data.get('email_ccdl_ok', False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if supplied_email_address is not None:
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get('no_send_job', False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException("Unable to queue download job. Something has gone"
" wrong and we have been notified about it.")
serializer.validated_data['is_processing'] = True
obj = serializer.save()
if settings.RUNNING_IN_CLOUD and settings.ENGAGEMENTBOT_WEBHOOK is not None \
and DatasetView._should_display_on_engagement_bot(supplied_email_address):
try:
try:
remote_ip = get_client_ip(self.request)
city = requests.get('https://ipapi.co/' + remote_ip + '/json/', timeout=10).json()['city']
except Exception:
city = "COULD_NOT_DETERMINE"
new_user_text = "New user " + supplied_email_address + " from " + city + " [" + remote_ip + "] downloaded a dataset! (" + str(old_object.id) + ")"
webhook_url = settings.ENGAGEMENTBOT_WEBHOOK
slack_json = {
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments":[
{ "color": "good",
"text": new_user_text
}
]
}
response = requests.post(
webhook_url,
json=slack_json,
headers={'Content-Type': 'application/json'},
timeout=10
)
except Exception as e:
# It doens't really matter if this didn't work
logger.error(e)
pass
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data['data'] = old_data
serializer.validated_data['aggregate_by'] = old_aggregate
serializer.save()
class CreateApiTokenView(generics.CreateAPIView):
"""
token_create
There're several endpoints like [/dataset](#tag/dataset) and [/results](#tag/results) that return
S3 urls where users can download the files we produce, however in order to get those files people
need to accept our terms of use by creating a token and activating it.
```
POST /token
PUT /token/{token-id} is_active=True
```
The token id needs to be sent on the `API_KEY` header on http requests.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
model = APIToken
serializer_class = APITokenSerializer
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None))
class APITokenView(generics.RetrieveUpdateAPIView):
"""
Read and modify Api Tokens.
get:
Return details about a specific token.
put:
This can be used to activate a specific token by sending `is_activated: true`.
"""
model = APIToken
lookup_field = 'id'
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
##
# Experiments
##
class ExperimentList(generics.ListAPIView):
""" Paginated list of all experiments. Advanced filtering can be done with the `/search` endpoint. """
model = Experiment
queryset = Experiment.public_objects.all()
serializer_class = ExperimentSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'description',
'accession_code',
'alternate_accession_code',
'source_database',
'source_url',
'has_publication',
'publication_title',
'publication_doi',
'pubmed_id',
'organisms',
'submitter_institution',
'created_at',
'last_modified',
'source_first_published',
'source_last_modified',
)
class ExperimentDetail(generics.RetrieveAPIView):
""" Retrieve details for an experiment given it's accession code """
lookup_field = "accession_code"
queryset = Experiment.public_objects.all()
serializer_class = DetailedExperimentSerializer
##
# Samples
##
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='dataset_id', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns samples that are added to a dataset.",
),
openapi.Parameter(
name='experiment_accession_code', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns only the samples associated with an experiment accession code.",
),
openapi.Parameter(
name='accession_codes', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Provide a list of sample accession codes separated by commas and the endpoint will only return information about these samples.",
),
]))
class SampleList(generics.ListAPIView):
""" Returns detailed information about Samples """
model = Sample
serializer_class = DetailedSampleSerializer
filter_backends = (filters.OrderingFilter, DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('-is_processed')
filterset_fields = (
'title',
'organism',
'source_database',
'source_archive_url',
'has_raw',
'platform_name',
'technology',
'manufacturer',
'sex',
'age',
'specimen_part',
'genotype',
'disease',
'disease_stage',
'cell_line',
'treatment',
'race',
'subject',
'compound',
'time',
'is_processed',
'is_public'
)
def get_queryset(self):
"""
ref https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
"""
queryset = Sample.public_objects \
.prefetch_related('organism') \
.prefetch_related('results') \
.prefetch_related('results__processor') \
.prefetch_related('results__computationalresultannotation_set') \
.prefetch_related('results__computedfile_set') \
.filter(**self.get_query_params_filters())
# case insensitive search https://docs.djangoproject.com/en/2.1/ref/models/querysets/#icontains
filter_by = self.request.query_params.get('filter_by', None)
if filter_by:
queryset = queryset.filter(Q(title__icontains=filter_by) |
Q(sex__icontains=filter_by) |
Q(age__icontains=filter_by) |
Q(specimen_part__icontains=filter_by) |
Q(genotype__icontains=filter_by) |
Q(disease__icontains=filter_by) |
Q(disease_stage__icontains=filter_by) |
Q(cell_line__icontains=filter_by) |
Q(treatment__icontains=filter_by) |
Q(race__icontains=filter_by) |
Q(subject__icontains=filter_by) |
Q(compound__icontains=filter_by) |
Q(time__icontains=filter_by))
return queryset
def get_query_params_filters(self):
""" We do advanced filtering on the queryset depending on the query parameters.
This returns the parameters that should be used for that. """
filter_dict = dict()
ids = self.request.query_params.get('ids', None)
if ids is not None:
ids = [int(x) for x in ids.split(',')]
filter_dict['pk__in'] = ids
experiment_accession_code = self.request.query_params.get('experiment_accession_code', None)
if experiment_accession_code:
experiment = get_object_or_404(Experiment.objects.values('id'), accession_code=experiment_accession_code)
filter_dict['experiments__in'] = [experiment['id']]
accession_codes = self.request.query_params.get('accession_codes', None)
if accession_codes:
accession_codes = accession_codes.split(',')
filter_dict['accession_code__in'] = accession_codes
dataset_id = self.request.query_params.get('dataset_id', None)
if dataset_id:
dataset = get_object_or_404(Dataset, id=dataset_id)
# Python doesn't provide a prettier way of doing this that I know about.
filter_dict['accession_code__in'] = [item for sublist in dataset.data.values() for item in sublist]
# Accept Organism in both name and ID form
organism_name = self.request.query_params.get('organism__name', None)
if organism_name:
filter_dict['organism__name'] = organism_name
return filter_dict
class SampleDetail(generics.RetrieveAPIView):
""" Retrieve the details for a Sample given it's accession code """
lookup_field = "accession_code"
queryset = Sample.public_objects.all()
serializer_class = DetailedSampleSerializer
##
# Processor
##
class ProcessorList(generics.ListAPIView):
"""List all processors."""
queryset = Processor.objects.all()
serializer_class = ProcessorSerializer
##
# Results
##
class ComputationalResultsList(generics.ListAPIView):
"""
computational_results_list
This lists all `ComputationalResult`. Each one contains meta-information about the output of a computer process. (Ex Salmon).
This can return valid S3 urls if a valid [token](#tag/token) is sent in the header `HTTP_API_KEY`.
"""
queryset = ComputationalResult.public_objects.all()
def get_serializer_class(self):
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return ComputationalResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return ComputationalResultSerializer
def filter_queryset(self, queryset):
filter_dict = self.request.query_params.dict()
filter_dict.pop('limit', None)
filter_dict.pop('offset', None)
return queryset.filter(**filter_dict)
##
# Search Filter Models
##
class OrganismList(generics.ListAPIView):
"""
Unpaginated list of all the available organisms.
"""
queryset = Organism.objects.all()
serializer_class = OrganismSerializer
paginator = None
class PlatformList(generics.ListAPIView):
"""
Unpaginated list of all the available "platform" information
"""
serializer_class = PlatformSerializer
paginator = None
def get_queryset(self):
return Sample.public_objects.all().values("platform_accession_code", "platform_name").distinct()
class InstitutionList(generics.ListAPIView):
"""
Unpaginated list of all the available "institution" information
"""
serializer_class = InstitutionSerializer
paginator = None
def get_queryset(self):
return Experiment.public_objects.all().values("submitter_institution").distinct()
##
# Jobs
##
class SurveyJobList(generics.ListAPIView):
"""
List of all SurveyJob.
"""
model = SurveyJob
queryset = SurveyJob.objects.all()
serializer_class = SurveyJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = SurveyJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
class DownloaderJobList(generics.ListAPIView):
"""
List of all DownloaderJob
"""
model = DownloaderJob
queryset = DownloaderJob.objects.all()
serializer_class = DownloaderJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = DownloaderJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
class ProcessorJobList(generics.ListAPIView):
"""
List of all ProcessorJobs.
"""
model = ProcessorJob
queryset = ProcessorJob.objects.all()
serializer_class = ProcessorJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = ProcessorJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
###
# Statistics
###
class AboutStats(APIView):
""" Returns general stats for the site, used in the about page """
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
# static values for now
dummy = request.query_params.dict().pop('dummy', None)
if dummy:
# add a dummy response, calculated these on 09/25/2019
result = {
'samples_available': 904953 + 391022,
'total_size_in_bytes': 832195361132962,
'supported_organisms': 43 + 159,
'experiments_processed': 35785 + 8661
}
return Response(result)
result = {
'samples_available': self._get_samples_available(),
'total_size_in_bytes': OriginalFile.objects.aggregate(total_size=Sum('size_in_bytes'))['total_size'],
'supported_organisms': self._get_supported_organisms(),
'experiments_processed': self._get_experiments_processed()
}
return Response(result)
def _get_experiments_processed(self):
""" total experiments with at least one sample processed """
experiments_with_sample_processed = Experiment.objects\
.annotate(
processed_samples_count=Count('samples', filter=Q(samples__is_processed=True)),
)\
.filter(Q(processed_samples_count__gt=1))\
.count()
experiments_with_sample_quant = ComputedFile.objects\
.filter(filename='quant.sf', result__samples__is_processed=False)\
.values_list('result__samples__experiments', flat=True)\
.distinct()\
.count()
return experiments_with_sample_processed + experiments_with_sample_quant
def _get_supported_organisms(self):
""" count organisms with qn targets or that have at least one sample with quant files """
organisms_with_qn_targets = Organism.objects.filter(qn_target__isnull=False).count()
organisms_without_qn_targets = Organism.objects.filter(
qn_target__isnull=True,
sample__is_processed=False,
sample__technology='RNA-SEQ',
sample__results__computedfile__filename='quant.sf'
).distinct().count()
return organisms_with_qn_targets + organisms_without_qn_targets
def _get_samples_available(self):
""" count the total number of samples that are processed or that have a quant.sf file associated with them """
processed_samples = Sample.objects.filter(is_processed=True).count()
unprocessed_samples_with_quant = Sample.objects.filter(
is_processed=False,
technology='RNA-SEQ',
results__computedfile__filename='quant.sf'
).distinct().count()
return processed_samples + unprocessed_samples_with_quant
class Stats(APIView):
""" Statistics about the health of the system. """
@swagger_auto_schema(manual_parameters=[openapi.Parameter(
name='range', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=('day', 'week', 'month', 'year',)
)])
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop('range', None)
cached_stats = Stats.calculate_stats(range_param)
return Response(cached_stats)
@classmethod
def calculate_stats(cls, range_param):
data = {}
data['generated_on'] = timezone.now()
data['survey_jobs'] = cls._get_job_stats(SurveyJob.objects, range_param)
data['downloader_jobs'] = cls._get_job_stats(DownloaderJob.objects, range_param)
data['processor_jobs'] = cls._get_job_stats(ProcessorJob.objects, range_param)
data['experiments'] = cls._get_object_stats(Experiment.objects, range_param)
# processed and unprocessed samples stats
data['unprocessed_samples'] = cls._get_object_stats(Sample.objects.filter(is_processed=False), range_param, 'last_modified')
data['processed_samples'] = cls._get_object_stats(Sample.processed_objects, range_param, 'last_modified')
data['processed_samples']['last_hour'] = cls._samples_processed_last_hour()
data['processed_samples']['technology'] = {}
techs = Sample.processed_objects.values('technology').annotate(count=Count('technology'))
for tech in techs:
if not tech['technology'] or not tech['technology'].strip():
continue
data['processed_samples']['technology'][tech['technology']] = tech['count']
data['processed_samples']['organism'] = {}
organisms = Sample.processed_objects.values('organism__name').annotate(count=Count('organism__name'))
for organism in organisms:
if not organism['organism__name']:
continue
data['processed_samples']['organism'][organism['organism__name']] = organism['count']
data['processed_experiments'] = cls._get_object_stats(Experiment.processed_public_objects)
data['active_volumes'] = list(get_active_volumes())
data['dataset'] = cls._get_dataset_stats(range_param)
if range_param:
data['input_data_size'] = cls._get_input_data_size()
data['output_data_size'] = cls._get_output_data_size()
data.update(get_nomad_jobs_breakdown())
return data
EMAIL_USERNAME_BLACKLIST = [
'arielsvn',
'cansav09',
'd.prasad',
'daniel.himmelstein',
'dv.prasad991',
'greenescientist',
'jaclyn.n.taroni',
'kurt.wheeler91',
'michael.zietz',
'miserlou'
]
@classmethod
def _get_dataset_stats(cls, range_param):
"""Returns stats for processed datasets"""
filter_query = Q()
for username in Stats.EMAIL_USERNAME_BLACKLIST:
filter_query = filter_query | Q(email_address__startswith=username)
filter_query = filter_query | Q(email_address__endswith='@alexslemonade.org')
processed_datasets = Dataset.objects.filter(is_processed=True, email_address__isnull=False).exclude(filter_query)
result = processed_datasets.aggregate(
total=Count('id'),
aggregated_by_experiment=Count('id', filter=Q(aggregate_by='EXPERIMENT')),
aggregated_by_species=Count('id', filter=Q(aggregate_by='SPECIES')),
scale_by_none=Count('id', filter=Q(scale_by='NONE')),
scale_by_minmax=Count('id', filter=Q(scale_by='MINMAX')),
scale_by_standard=Count('id', filter=Q(scale_by='STANDARD')),
scale_by_robust=Count('id', filter=Q(scale_by='ROBUST')),
)
if range_param:
# We don't save the dates when datasets are processed, but we can use
# `last_modified`, since datasets aren't modified again after they are processed
result['timeline'] = cls._get_intervals(
processed_datasets,
range_param,
'last_modified'
).annotate(
total=Count('id'),
total_size=Sum('size_in_bytes')
)
return result
@classmethod
def _samples_processed_last_hour(cls):
current_date = datetime.now(tz=timezone.utc)
start = current_date - timedelta(hours=1)
return Sample.processed_objects.filter(last_modified__range=(start, current_date)).count()
@classmethod
def _get_input_data_size(cls):
total_size = OriginalFile.objects.filter(
sample__is_processed=True # <-- SLOW
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_output_data_size(cls):
total_size = ComputedFile.public_objects.all().filter(
s3_bucket__isnull=False,
s3_key__isnull=True
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_job_stats(cls, jobs, range_param):
start_filter = Q()
if range_param:
current_date = datetime.now(tz=timezone.utc)
start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}.get(range_param)
start_filter = start_filter | Q(start_time__gte=start_date) | Q(start_time__isnull=True)
result = jobs.filter(start_filter).aggregate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
open=Count('id', filter=Q(start_time__isnull=False,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
)
# via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
result['average_time'] = jobs.filter(start_filter).filter(
start_time__isnull=False,
end_time__isnull=False,
success=True
).aggregate(
average_time=Avg(F('end_time') - F('start_time'))
)['average_time']
if not result['average_time']:
result['average_time'] = 0
else:
result['average_time'] = result['average_time'].total_seconds()
if range_param:
result['timeline'] = cls._get_intervals(jobs, range_param) \
.annotate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True, success__isnull=True)),
open=Count('id', filter=Q(start_time__isnull=False, success__isnull=True)),
)
return result
@classmethod
def _get_object_stats(cls, objects, range_param=False, field='created_at'):
result = {
'total': objects.count()
}
if range_param:
result['timeline'] = cls._get_intervals(objects, range_param, field)\
.annotate(total=Count('id'))
return result
@classmethod
def _get_intervals(cls, objects, range_param, field='last_modified'):
range_to_trunc = {
'day': 'hour',
'week': 'day',
'month': 'day',
'year': 'month'
}
current_date = datetime.now(tz=timezone.utc)
range_to_start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}
# truncate the `last_modified` field by hour, day or month depending on the `range` param
# and annotate each object with that. This will allow us to count the number of objects
# on each interval with a single query
# ref https://stackoverflow.com/a/38359913/763705
return objects.annotate(start=Trunc(field, range_to_trunc.get(range_param), output_field=DateTimeField())) \
.values('start') \
.filter(start__gte=range_to_start_date.get(range_param))
###
# Transcriptome Indices
###
class TranscriptomeIndexList(generics.ListAPIView):
""" List all Transcriptome Indices. These are a special type of process result, necessary for processing other SRA samples. """
serializer_class = OrganismIndexSerializer
def get_queryset(self):
return OrganismIndex.objects.distinct("organism", "index_type")
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name='length', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="",
enum=('short', 'long',),
default='short'
),
]))
class TranscriptomeIndexDetail(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
def get_object(self):
organism_name = self.kwargs['organism_name'].upper()
length = self.request.query_params.get('length', 'short')
# Get the correct organism index object, serialize it, and return it
transcription_length = "TRANSCRIPTOME_" + length.upper()
try:
organism = Organism.objects.get(name=organism_name.upper())
organism_index = OrganismIndex.objects.exclude(s3_url__exact="")\
.distinct("organism", "index_type")\
.get(organism=organism, index_type=transcription_length)
return organism_index
except OrganismIndex.DoesNotExist:
raise Http404('Organism does not exists')
###
# Compendia
###
class CompendiaDetail(APIView):
"""
A very simple modified ComputedFile endpoint which only shows Compendia results.
"""
@swagger_auto_schema(deprecated=True)
def get(self, request, version, format=None):
computed_files = ComputedFile.objects.filter(is_compendia=True, is_public=True, is_qn_target=False).order_by('-created_at')
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
serializer = CompendiaWithUrlSerializer(computed_files, many=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
serializer = CompendiaSerializer(computed_files, many=True)
return Response(serializer.data)
###
# QN Targets
###
class QNTargetsAvailable(generics.ListAPIView):
"""
This is a list of all of the organisms which have available QN Targets
"""
serializer_class = OrganismSerializer
paginator = None
def get_queryset(self):
return Organism.get_objects_with_qn_targets()
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Eg `DANIO_RERIO`, `MUS_MUSCULUS`",
)], responses={404: 'QN Target not found for the given organism.'}))
class QNTargetsDetail(generics.RetrieveAPIView):
"""
Get a detailed view of the Quantile Normalization file for an organism.
"""
serializer_class = QNTargetSerializer
def get_object(self):
organism = self.kwargs['organism_name']
organism = organism.upper().replace(" ", "_")
try:
organism_id = Organism.get_object_for_name(organism).id
annotation = ComputationalResultAnnotation.objects.filter(
data__organism_id=organism_id,
data__is_qn=True
).order_by(
'-created_at'
).first()
qn_target = annotation.result.computedfile_set.first()
except Exception:
raise NotFound("Don't have a target for that organism!")
if not qn_target:
raise NotFound("Don't have a target for that organism!!")
return qn_target
##
# Computed Files
##
class ComputedFilesList(generics.ListAPIView):
"""
computed_files_list
ComputedFiles are representation of files created by data-refinery processes.
This can also be used to fetch all the compendia files we have generated with:
```
GET /computed_files?is_compendia=True&is_public=True
```
"""
queryset = ComputedFile.objects.all()
serializer_class = ComputedFileListSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = (
'id',
'is_qn_target',
'is_smashable',
'is_qc',
'is_compendia',
'quant_sf_only',
'svd_algorithm',
'compendia_version',
'created_at',
'last_modified',
)
ordering_fields = ('id', 'created_at', 'last_modified', 'compendia_version',)
ordering = ('-id',)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(ComputedFilesList, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
##
# Util
##
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '')
return ip
<|code_end|>
|
Improve docs and make deploy.sh easier to invoke correctly
## Issue Number
I don't think there's one that's quite specific to the types of documentation improvements this introduces.
## Purpose/Implementation Notes
Our README was getting behind. This brings it up to date and makes the deploy process easier to follow.
## Types of changes
- New feature (non-breaking change which adds functionality)
- Documentation
## Functional tests
None yet. This should be tested by spinning up a stack and destroying it.
## Checklist
- [x] Lint and unit tests pass locally with my changes
- [x] I have added tests that prove my fix is effective or that my feature works
- [x] I have added necessary documentation (if appropriate)
- [x] Any dependent changes have been merged and published in downstream modules
| api/data_refinery_api/views.py
<|code_start|>from datetime import timedelta, datetime
import requests
import nomad
from typing import Dict
from itertools import groupby
from re import match
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.db.models import Count, Prefetch, DateTimeField
from django.db.models.functions import Trunc
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import F, Q
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_TERMS,
LOOKUP_FILTER_RANGE,
LOOKUP_FILTER_PREFIX,
LOOKUP_FILTER_WILDCARD,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_GTE,
LOOKUP_QUERY_LT,
LOOKUP_QUERY_LTE,
LOOKUP_QUERY_EXCLUDE,
)
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
IdsFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackend
)
from django_filters.rest_framework import DjangoFilterBackend
import django_filters
from elasticsearch_dsl import TermsFacet, DateHistogramFacet
from rest_framework import status, filters, generics, mixins
from rest_framework.exceptions import APIException, NotFound
from rest_framework.exceptions import ValidationError
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from data_refinery_api.serializers import (
ComputationalResultSerializer,
ComputationalResultWithUrlSerializer,
DetailedExperimentSerializer,
DetailedSampleSerializer,
ExperimentSerializer,
InstitutionSerializer,
OrganismIndexSerializer,
OrganismSerializer,
PlatformSerializer,
ProcessorSerializer,
SampleSerializer,
CompendiaSerializer,
CompendiaWithUrlSerializer,
QNTargetSerializer,
ComputedFileListSerializer,
# Job
DownloaderJobSerializer,
ProcessorJobSerializer,
SurveyJobSerializer,
# Dataset
APITokenSerializer,
CreateDatasetSerializer,
DatasetSerializer,
)
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SurveyJob,
)
from data_refinery_common.models.documents import (
ExperimentDocument
)
from data_refinery_common.utils import (
get_env_variable,
get_active_volumes,
get_nomad_jobs_breakdown,
)
from data_refinery_common.logging import get_and_configure_logger
from .serializers import ExperimentDocumentSerializer
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
logger = get_and_configure_logger(__name__)
##
# Variables
##
JOB_CREATED_AT_CUTOFF = datetime(2019, 6, 5, tzinfo=timezone.utc)
##
# ElasticSearch
##
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from six import iteritems
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet['facet'].get_aggregation()
queryset.aggs.bucket(field, agg)\
.metric('total_samples', 'cardinality', field='downloadable_samples', precision_threshold=40000)
return queryset
##
# ElasticSearch powered Search and Filter
##
@method_decorator(name='list', decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name='technology', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name='has_publication', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name='platform', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name='organism', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name='num_processed_samples', in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
"""))
class ExperimentDocumentView(DocumentViewSet):
""" ElasticSearch powered experiment search. """
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended
]
# Primitive
lookup_field = 'id'
# Define search fields
# Is this exhaustive enough?
search_fields = {
'title': {'boost': 10},
'publication_authors': {'boost': 8}, # "People will search themselves"
'publication_title': {'boost': 5},
'submitter_institution': {'boost': 3},
'description': {'boost': 2},
'accession_code': None,
'alternate_accession_code': None,
'publication_doi': None,
'pubmed_id': None,
'sample_metadata_fields': None,
'platform_names': None
}
# Define filtering fields
filter_fields = {
'id': {
'field': '_id',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN
],
},
'technology': 'technology',
'has_publication': 'has_publication',
'platform': 'platform_accession_codes',
'organism': 'organism_names',
'num_processed_samples': {
'field': 'num_processed_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
},
'num_downloadable_samples': {
'field': 'num_downloadable_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
}
}
# Define ordering fields
ordering_fields = {
'id': 'id',
'title': 'title.raw',
'description': 'description.raw',
'num_total_samples': 'num_total_samples',
'num_downloadable_samples': 'num_downloadable_samples',
'source_first_published': 'source_first_published'
}
# Specify default ordering
ordering = ('_score', '-num_total_samples', 'id', 'title', 'description', '-source_first_published')
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
'technology': {
'field': 'technology',
'facet': TermsFacet,
'enabled': True # These are enabled by default, which is more expensive but more simple.
},
'organism_names': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': True,
'options': {
'size': 999999
}
},
'platform_accession_codes': {
'field': 'platform_accession_codes',
'facet': TermsFacet,
'enabled': True,
'global': False,
'options': {
'size': 999999
}
},
'has_publication': {
'field': 'has_publication',
'facet': TermsFacet,
'enabled': True,
'global': False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
'technology_global': {
'field': 'technology',
'facet': TermsFacet,
'enabled': False,
'global': True
},
'organism_names_global': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'platform_names_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'has_publication_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
},
}
faceted_search_param = 'facet'
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data['facets'] = self.transform_es_facets(response.data['facets'])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet['buckets']:
if field == 'has_publication':
filter_group[bucket['key_as_string']] = bucket['total_samples']['value']
else:
filter_group[bucket['key']] = bucket['total_samples']['value']
result[field] = filter_group
return result
##
# Dataset
##
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(name='get', decorator=swagger_auto_schema(operation_description="View a single Dataset.",manual_parameters=[
openapi.Parameter(
name='details', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)]))
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None)) # partial updates not supported
@method_decorator(name='put', decorator=swagger_auto_schema(operation_description="""
Modify an existing Dataset.
Set `start` to `true` along with a valid activated API token (from `/token/`) to begin smashing and delivery.
You must also supply `email_address` with `start`, though this will never be serialized back to you.
"""))
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = 'id'
@staticmethod
def _should_display_on_engagement_bot(email: str) -> bool:
return email is not None \
and email.find("cansav09") != 0 \
and email.find("arielsvn") != 0 \
and email.find("jaclyn.n.taroni") != 0 \
and email.find("kurt.wheeler") != 0 \
and email.find("greenescientist") != 0 \
and email.find("@alexslemonade.org") == -1 \
and email.find("miserlou") != 0 \
and email.find("michael.zietz@gmail.com") != 0 \
and email.find("d.prasad") != 0 \
and email.find("daniel.himmelstein@gmail.com") != 0 \
and email.find("dv.prasad991@gmail.com") != 0
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that. """
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data['data'].keys():
accessions = new_data['data'][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(experiment.samples.filter(is_processed=True, organism__in=qn_organisms).values_list('accession_code', flat=True))
new_data['data'][key] = sample_codes
if old_object.is_processed:
raise APIException("You may not update Datasets which have already been processed")
if new_data.get('start'):
# Make sure we have a valid activated token.
token_id = self.request.data.get('token_id', None)
if not token_id:
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
raise APIException("You must provide an active API token ID")
supplied_email_address = self.request.data.get('email_address', None)
email_ccdl_ok = self.request.data.get('email_ccdl_ok', False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if supplied_email_address is not None:
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get('no_send_job', False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException("Unable to queue download job. Something has gone"
" wrong and we have been notified about it.")
serializer.validated_data['is_processing'] = True
obj = serializer.save()
if settings.RUNNING_IN_CLOUD and settings.ENGAGEMENTBOT_WEBHOOK is not None \
and DatasetView._should_display_on_engagement_bot(supplied_email_address):
try:
try:
remote_ip = get_client_ip(self.request)
city = requests.get('https://ipapi.co/' + remote_ip + '/json/', timeout=10).json()['city']
except Exception:
city = "COULD_NOT_DETERMINE"
new_user_text = "New user " + supplied_email_address + " from " + city + " [" + remote_ip + "] downloaded a dataset! (" + str(old_object.id) + ")"
webhook_url = settings.ENGAGEMENTBOT_WEBHOOK
slack_json = {
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments":[
{ "color": "good",
"text": new_user_text
}
]
}
response = requests.post(
webhook_url,
json=slack_json,
headers={'Content-Type': 'application/json'},
timeout=10
)
except Exception as e:
# It doens't really matter if this didn't work
logger.error(e)
pass
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data['data'] = old_data
serializer.validated_data['aggregate_by'] = old_aggregate
serializer.save()
class CreateApiTokenView(generics.CreateAPIView):
"""
token_create
There're several endpoints like [/dataset](#tag/dataset) and [/results](#tag/results) that return
S3 urls where users can download the files we produce, however in order to get those files people
need to accept our terms of use by creating a token and activating it.
```
POST /token
PUT /token/{token-id} is_active=True
```
The token id needs to be sent on the `API_KEY` header on http requests.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
model = APIToken
serializer_class = APITokenSerializer
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None))
class APITokenView(generics.RetrieveUpdateAPIView):
"""
Read and modify Api Tokens.
get:
Return details about a specific token.
put:
This can be used to activate a specific token by sending `is_activated: true`.
"""
model = APIToken
lookup_field = 'id'
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
##
# Experiments
##
class ExperimentList(generics.ListAPIView):
""" Paginated list of all experiments. Advanced filtering can be done with the `/search` endpoint. """
model = Experiment
queryset = Experiment.public_objects.all()
serializer_class = ExperimentSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'description',
'accession_code',
'alternate_accession_code',
'source_database',
'source_url',
'has_publication',
'publication_title',
'publication_doi',
'pubmed_id',
'organisms',
'submitter_institution',
'created_at',
'last_modified',
'source_first_published',
'source_last_modified',
)
class ExperimentDetail(generics.RetrieveAPIView):
""" Retrieve details for an experiment given it's accession code """
lookup_field = "accession_code"
queryset = Experiment.public_objects.all()
serializer_class = DetailedExperimentSerializer
##
# Samples
##
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='dataset_id', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns samples that are added to a dataset.",
),
openapi.Parameter(
name='experiment_accession_code', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns only the samples associated with an experiment accession code.",
),
openapi.Parameter(
name='accession_codes', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Provide a list of sample accession codes separated by commas and the endpoint will only return information about these samples.",
),
]))
class SampleList(generics.ListAPIView):
""" Returns detailed information about Samples """
model = Sample
serializer_class = DetailedSampleSerializer
filter_backends = (filters.OrderingFilter, DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('-is_processed')
filterset_fields = (
'title',
'organism',
'source_database',
'source_archive_url',
'has_raw',
'platform_name',
'technology',
'manufacturer',
'sex',
'age',
'specimen_part',
'genotype',
'disease',
'disease_stage',
'cell_line',
'treatment',
'race',
'subject',
'compound',
'time',
'is_processed',
'is_public'
)
def get_queryset(self):
"""
ref https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
"""
queryset = Sample.public_objects \
.prefetch_related('organism') \
.prefetch_related('results') \
.prefetch_related('results__processor') \
.prefetch_related('results__computationalresultannotation_set') \
.prefetch_related('results__computedfile_set') \
.filter(**self.get_query_params_filters())
# case insensitive search https://docs.djangoproject.com/en/2.1/ref/models/querysets/#icontains
filter_by = self.request.query_params.get('filter_by', None)
if filter_by:
queryset = queryset.filter(Q(title__icontains=filter_by) |
Q(sex__icontains=filter_by) |
Q(age__icontains=filter_by) |
Q(specimen_part__icontains=filter_by) |
Q(genotype__icontains=filter_by) |
Q(disease__icontains=filter_by) |
Q(disease_stage__icontains=filter_by) |
Q(cell_line__icontains=filter_by) |
Q(treatment__icontains=filter_by) |
Q(race__icontains=filter_by) |
Q(subject__icontains=filter_by) |
Q(compound__icontains=filter_by) |
Q(time__icontains=filter_by))
return queryset
def get_query_params_filters(self):
""" We do advanced filtering on the queryset depending on the query parameters.
This returns the parameters that should be used for that. """
filter_dict = dict()
ids = self.request.query_params.get('ids', None)
if ids is not None:
ids = [int(x) for x in ids.split(',')]
filter_dict['pk__in'] = ids
experiment_accession_code = self.request.query_params.get('experiment_accession_code', None)
if experiment_accession_code:
experiment = get_object_or_404(Experiment.objects.values('id'), accession_code=experiment_accession_code)
filter_dict['experiments__in'] = [experiment['id']]
accession_codes = self.request.query_params.get('accession_codes', None)
if accession_codes:
accession_codes = accession_codes.split(',')
filter_dict['accession_code__in'] = accession_codes
dataset_id = self.request.query_params.get('dataset_id', None)
if dataset_id:
dataset = get_object_or_404(Dataset, id=dataset_id)
# Python doesn't provide a prettier way of doing this that I know about.
filter_dict['accession_code__in'] = [item for sublist in dataset.data.values() for item in sublist]
# Accept Organism in both name and ID form
organism_name = self.request.query_params.get('organism__name', None)
if organism_name:
filter_dict['organism__name'] = organism_name
return filter_dict
class SampleDetail(generics.RetrieveAPIView):
""" Retrieve the details for a Sample given it's accession code """
lookup_field = "accession_code"
queryset = Sample.public_objects.all()
serializer_class = DetailedSampleSerializer
##
# Processor
##
class ProcessorList(generics.ListAPIView):
"""List all processors."""
queryset = Processor.objects.all()
serializer_class = ProcessorSerializer
##
# Results
##
class ComputationalResultsList(generics.ListAPIView):
"""
computational_results_list
This lists all `ComputationalResult`. Each one contains meta-information about the output of a computer process. (Ex Salmon).
This can return valid S3 urls if a valid [token](#tag/token) is sent in the header `HTTP_API_KEY`.
"""
queryset = ComputationalResult.public_objects.all()
def get_serializer_class(self):
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return ComputationalResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return ComputationalResultSerializer
def filter_queryset(self, queryset):
filter_dict = self.request.query_params.dict()
filter_dict.pop('limit', None)
filter_dict.pop('offset', None)
return queryset.filter(**filter_dict)
##
# Search Filter Models
##
class OrganismList(generics.ListAPIView):
"""
Unpaginated list of all the available organisms.
"""
queryset = Organism.objects.all()
serializer_class = OrganismSerializer
paginator = None
class PlatformList(generics.ListAPIView):
"""
Unpaginated list of all the available "platform" information
"""
serializer_class = PlatformSerializer
paginator = None
def get_queryset(self):
return Sample.public_objects.all().values("platform_accession_code", "platform_name").distinct()
class InstitutionList(generics.ListAPIView):
"""
Unpaginated list of all the available "institution" information
"""
serializer_class = InstitutionSerializer
paginator = None
def get_queryset(self):
return Experiment.public_objects.all().values("submitter_institution").distinct()
##
# Jobs
##
class SurveyJobList(generics.ListAPIView):
"""
List of all SurveyJob.
"""
model = SurveyJob
queryset = SurveyJob.objects.all()
serializer_class = SurveyJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = SurveyJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
class DownloaderJobList(generics.ListAPIView):
"""
List of all DownloaderJob
"""
model = DownloaderJob
queryset = DownloaderJob.objects.all()
serializer_class = DownloaderJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = DownloaderJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
class ProcessorJobList(generics.ListAPIView):
"""
List of all ProcessorJobs.
"""
model = ProcessorJob
queryset = ProcessorJob.objects.all()
serializer_class = ProcessorJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = ProcessorJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
###
# Statistics
###
class AboutStats(APIView):
""" Returns general stats for the site, used in the about page """
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
# static values for now
dummy = request.query_params.dict().pop('dummy', None)
if dummy:
# add a dummy response, calculated these on 09/25/2019
result = {
'samples_available': 904953 + 391022,
'total_size_in_bytes': 832195361132962,
'supported_organisms': 43 + 159,
'experiments_processed': 35785 + 8661
}
return Response(result)
result = {
'samples_available': self._get_samples_available(),
'total_size_in_bytes': OriginalFile.objects.aggregate(total_size=Sum('size_in_bytes'))['total_size'],
'supported_organisms': self._get_supported_organisms(),
'experiments_processed': self._get_experiments_processed()
}
return Response(result)
def _get_experiments_processed(self):
""" total experiments with at least one sample processed """
experiments_with_sample_processed = Experiment.objects\
.annotate(
processed_samples_count=Count('samples', filter=Q(samples__is_processed=True)),
)\
.filter(Q(processed_samples_count__gt=1))\
.count()
experiments_with_sample_quant = ComputedFile.objects\
.filter(filename='quant.sf', result__samples__is_processed=False)\
.values_list('result__samples__experiments', flat=True)\
.distinct()\
.count()
return experiments_with_sample_processed + experiments_with_sample_quant
def _get_supported_organisms(self):
""" count organisms with qn targets or that have at least one sample with quant files """
organisms_with_qn_targets = Organism.objects.filter(qn_target__isnull=False).count()
organisms_without_qn_targets = Organism.objects.filter(
qn_target__isnull=True,
sample__is_processed=False,
sample__technology='RNA-SEQ',
sample__results__computedfile__filename='quant.sf'
).distinct().count()
return organisms_with_qn_targets + organisms_without_qn_targets
def _get_samples_available(self):
""" count the total number of samples that are processed or that have a quant.sf file associated with them """
processed_samples = Sample.objects.filter(is_processed=True).count()
unprocessed_samples_with_quant = Sample.objects.filter(
is_processed=False,
technology='RNA-SEQ',
results__computedfile__filename='quant.sf'
).distinct().count()
return processed_samples + unprocessed_samples_with_quant
class Stats(APIView):
""" Statistics about the health of the system. """
@swagger_auto_schema(manual_parameters=[openapi.Parameter(
name='range', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=('day', 'week', 'month', 'year',)
)])
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop('range', None)
cached_stats = Stats.calculate_stats(range_param)
return Response(cached_stats)
@classmethod
def calculate_stats(cls, range_param):
data = {}
data['survey_jobs'] = cls._get_job_stats(SurveyJob.objects, range_param)
data['downloader_jobs'] = cls._get_job_stats(DownloaderJob.objects, range_param)
data['processor_jobs'] = cls._get_job_stats(ProcessorJob.objects, range_param)
data['experiments'] = cls._get_object_stats(Experiment.objects, range_param)
# processed and unprocessed samples stats
data['unprocessed_samples'] = cls._get_object_stats(Sample.objects.filter(is_processed=False), range_param, 'last_modified')
data['processed_samples'] = cls._get_object_stats(Sample.processed_objects, range_param, 'last_modified')
data['processed_samples']['last_hour'] = cls._samples_processed_last_hour()
data['processed_samples']['technology'] = {}
techs = Sample.processed_objects.values('technology').annotate(count=Count('technology'))
for tech in techs:
if not tech['technology'] or not tech['technology'].strip():
continue
data['processed_samples']['technology'][tech['technology']] = tech['count']
data['processed_samples']['organism'] = {}
organisms = Sample.processed_objects.values('organism__name').annotate(count=Count('organism__name'))
for organism in organisms:
if not organism['organism__name']:
continue
data['processed_samples']['organism'][organism['organism__name']] = organism['count']
data['processed_experiments'] = cls._get_object_stats(Experiment.processed_public_objects)
data['active_volumes'] = list(get_active_volumes())
data['dataset'] = cls._get_dataset_stats(range_param)
if range_param:
data['input_data_size'] = cls._get_input_data_size()
data['output_data_size'] = cls._get_output_data_size()
data.update(get_nomad_jobs_breakdown())
return data
EMAIL_USERNAME_BLACKLIST = [
'arielsvn',
'cansav09',
'd.prasad',
'daniel.himmelstein',
'dv.prasad991',
'greenescientist',
'jaclyn.n.taroni',
'kurt.wheeler91',
'michael.zietz',
'miserlou'
]
@classmethod
def _get_dataset_stats(cls, range_param):
"""Returns stats for processed datasets"""
filter_query = Q()
for username in Stats.EMAIL_USERNAME_BLACKLIST:
filter_query = filter_query | Q(email_address__startswith=username)
filter_query = filter_query | Q(email_address__endswith='@alexslemonade.org')
processed_datasets = Dataset.objects.filter(is_processed=True, email_address__isnull=False).exclude(filter_query)
result = processed_datasets.aggregate(
total=Count('id'),
aggregated_by_experiment=Count('id', filter=Q(aggregate_by='EXPERIMENT')),
aggregated_by_species=Count('id', filter=Q(aggregate_by='SPECIES')),
scale_by_none=Count('id', filter=Q(scale_by='NONE')),
scale_by_minmax=Count('id', filter=Q(scale_by='MINMAX')),
scale_by_standard=Count('id', filter=Q(scale_by='STANDARD')),
scale_by_robust=Count('id', filter=Q(scale_by='ROBUST')),
)
if range_param:
# We don't save the dates when datasets are processed, but we can use
# `last_modified`, since datasets aren't modified again after they are processed
result['timeline'] = cls._get_intervals(
processed_datasets,
range_param,
'last_modified'
).annotate(
total=Count('id'),
total_size=Sum('size_in_bytes')
)
return result
@classmethod
def _samples_processed_last_hour(cls):
current_date = datetime.now(tz=timezone.utc)
start = current_date - timedelta(hours=1)
return Sample.processed_objects.filter(last_modified__range=(start, current_date)).count()
@classmethod
def _get_input_data_size(cls):
total_size = OriginalFile.objects.filter(
sample__is_processed=True # <-- SLOW
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_output_data_size(cls):
total_size = ComputedFile.public_objects.all().filter(
s3_bucket__isnull=False,
s3_key__isnull=True
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_job_stats(cls, jobs, range_param):
start_filter = Q()
if range_param:
current_date = datetime.now(tz=timezone.utc)
start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}.get(range_param)
start_filter = start_filter | Q(start_time__gte=start_date) | Q(start_time__isnull=True)
result = jobs.filter(start_filter).aggregate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
open=Count('id', filter=Q(start_time__isnull=False,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
)
# via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
result['average_time'] = jobs.filter(start_filter).filter(
start_time__isnull=False,
end_time__isnull=False,
success=True
).aggregate(
average_time=Avg(F('end_time') - F('start_time'))
)['average_time']
if not result['average_time']:
result['average_time'] = 0
else:
result['average_time'] = result['average_time'].total_seconds()
if range_param:
result['timeline'] = cls._get_intervals(jobs, range_param) \
.annotate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True, success__isnull=True)),
open=Count('id', filter=Q(start_time__isnull=False, success__isnull=True)),
)
return result
@classmethod
def _get_object_stats(cls, objects, range_param=False, field='created_at'):
result = {
'total': objects.count()
}
if range_param:
result['timeline'] = cls._get_intervals(objects, range_param, field)\
.annotate(total=Count('id'))
return result
@classmethod
def _get_intervals(cls, objects, range_param, field='last_modified'):
range_to_trunc = {
'day': 'hour',
'week': 'day',
'month': 'day',
'year': 'month'
}
current_date = datetime.now(tz=timezone.utc)
range_to_start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}
# truncate the `last_modified` field by hour, day or month depending on the `range` param
# and annotate each object with that. This will allow us to count the number of objects
# on each interval with a single query
# ref https://stackoverflow.com/a/38359913/763705
return objects.annotate(start=Trunc(field, range_to_trunc.get(range_param), output_field=DateTimeField())) \
.values('start') \
.filter(start__gte=range_to_start_date.get(range_param))
###
# Transcriptome Indices
###
class TranscriptomeIndexList(generics.ListAPIView):
""" List all Transcriptome Indices. These are a special type of process result, necessary for processing other SRA samples. """
serializer_class = OrganismIndexSerializer
def get_queryset(self):
return OrganismIndex.objects.distinct("organism", "index_type")
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name='length', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="",
enum=('short', 'long',),
default='short'
),
]))
class TranscriptomeIndexDetail(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
def get_object(self):
organism_name = self.kwargs['organism_name'].upper()
length = self.request.query_params.get('length', 'short')
# Get the correct organism index object, serialize it, and return it
transcription_length = "TRANSCRIPTOME_" + length.upper()
try:
organism = Organism.objects.get(name=organism_name.upper())
organism_index = OrganismIndex.objects.exclude(s3_url__exact="")\
.distinct("organism", "index_type")\
.get(organism=organism, index_type=transcription_length)
return organism_index
except OrganismIndex.DoesNotExist:
raise Http404('Organism does not exists')
###
# Compendia
###
class CompendiaDetail(APIView):
"""
A very simple modified ComputedFile endpoint which only shows Compendia results.
"""
@swagger_auto_schema(deprecated=True)
def get(self, request, version, format=None):
computed_files = ComputedFile.objects.filter(is_compendia=True, is_public=True, is_qn_target=False).order_by('-created_at')
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
serializer = CompendiaWithUrlSerializer(computed_files, many=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
serializer = CompendiaSerializer(computed_files, many=True)
return Response(serializer.data)
###
# QN Targets
###
class QNTargetsAvailable(generics.ListAPIView):
"""
This is a list of all of the organisms which have available QN Targets
"""
serializer_class = OrganismSerializer
paginator = None
def get_queryset(self):
return Organism.get_objects_with_qn_targets()
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Eg `DANIO_RERIO`, `MUS_MUSCULUS`",
)], responses={404: 'QN Target not found for the given organism.'}))
class QNTargetsDetail(generics.RetrieveAPIView):
"""
Get a detailed view of the Quantile Normalization file for an organism.
"""
serializer_class = QNTargetSerializer
def get_object(self):
organism = self.kwargs['organism_name']
organism = organism.upper().replace(" ", "_")
try:
organism_id = Organism.get_object_for_name(organism).id
annotation = ComputationalResultAnnotation.objects.filter(
data__organism_id=organism_id,
data__is_qn=True
).order_by(
'-created_at'
).first()
qn_target = annotation.result.computedfile_set.first()
except Exception:
raise NotFound("Don't have a target for that organism!")
if not qn_target:
raise NotFound("Don't have a target for that organism!!")
return qn_target
##
# Computed Files
##
class ComputedFilesList(generics.ListAPIView):
"""
computed_files_list
ComputedFiles are representation of files created by data-refinery processes.
This can also be used to fetch all the compendia files we have generated with:
```
GET /computed_files?is_compendia=True&is_public=True
```
"""
queryset = ComputedFile.objects.all()
serializer_class = ComputedFileListSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = (
'id',
'is_qn_target',
'is_smashable',
'is_qc',
'is_compendia',
'quant_sf_only',
'svd_algorithm',
'compendia_version',
'created_at',
'last_modified',
)
ordering_fields = ('id', 'created_at', 'last_modified', 'compendia_version',)
ordering = ('-id',)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(ComputedFilesList, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
##
# Util
##
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '')
return ip
<|code_end|>
| api/data_refinery_api/views.py
<|code_start|>from datetime import timedelta, datetime
import requests
import nomad
from typing import Dict
from itertools import groupby
from re import match
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.db.models import Count, Prefetch, DateTimeField
from django.db.models.functions import Trunc
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import F, Q
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_TERMS,
LOOKUP_FILTER_RANGE,
LOOKUP_FILTER_PREFIX,
LOOKUP_FILTER_WILDCARD,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_GTE,
LOOKUP_QUERY_LT,
LOOKUP_QUERY_LTE,
LOOKUP_QUERY_EXCLUDE,
)
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
IdsFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackend
)
from django_filters.rest_framework import DjangoFilterBackend
import django_filters
from elasticsearch_dsl import TermsFacet, DateHistogramFacet
from rest_framework import status, filters, generics, mixins
from rest_framework.exceptions import APIException, NotFound
from rest_framework.exceptions import ValidationError
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from data_refinery_api.serializers import (
ComputationalResultSerializer,
ComputationalResultWithUrlSerializer,
DetailedExperimentSerializer,
DetailedSampleSerializer,
ExperimentSerializer,
InstitutionSerializer,
OrganismIndexSerializer,
OrganismSerializer,
PlatformSerializer,
ProcessorSerializer,
SampleSerializer,
CompendiaSerializer,
CompendiaWithUrlSerializer,
QNTargetSerializer,
ComputedFileListSerializer,
# Job
DownloaderJobSerializer,
ProcessorJobSerializer,
SurveyJobSerializer,
# Dataset
APITokenSerializer,
CreateDatasetSerializer,
DatasetSerializer,
)
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SurveyJob,
)
from data_refinery_common.models.documents import (
ExperimentDocument
)
from data_refinery_common.utils import (
get_env_variable,
get_active_volumes,
get_nomad_jobs_breakdown,
)
from data_refinery_common.logging import get_and_configure_logger
from .serializers import ExperimentDocumentSerializer
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
logger = get_and_configure_logger(__name__)
##
# Variables
##
JOB_CREATED_AT_CUTOFF = datetime(2019, 6, 5, tzinfo=timezone.utc)
##
# ElasticSearch
##
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from six import iteritems
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet['facet'].get_aggregation()
queryset.aggs.bucket(field, agg)\
.metric('total_samples', 'cardinality', field='downloadable_samples', precision_threshold=40000)
return queryset
##
# ElasticSearch powered Search and Filter
##
@method_decorator(name='list', decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name='technology', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name='has_publication', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name='platform', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name='organism', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name='num_processed_samples', in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
"""))
class ExperimentDocumentView(DocumentViewSet):
""" ElasticSearch powered experiment search. """
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended
]
# Primitive
lookup_field = 'id'
# Define search fields
# Is this exhaustive enough?
search_fields = {
'title': {'boost': 10},
'publication_authors': {'boost': 8}, # "People will search themselves"
'publication_title': {'boost': 5},
'submitter_institution': {'boost': 3},
'description': {'boost': 2},
'accession_code': None,
'alternate_accession_code': None,
'publication_doi': None,
'pubmed_id': None,
'sample_metadata_fields': None,
'platform_names': None
}
# Define filtering fields
filter_fields = {
'id': {
'field': '_id',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN
],
},
'technology': 'technology',
'has_publication': 'has_publication',
'platform': 'platform_accession_codes',
'organism': 'organism_names',
'num_processed_samples': {
'field': 'num_processed_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
},
'num_downloadable_samples': {
'field': 'num_downloadable_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
}
}
# Define ordering fields
ordering_fields = {
'id': 'id',
'title': 'title.raw',
'description': 'description.raw',
'num_total_samples': 'num_total_samples',
'num_downloadable_samples': 'num_downloadable_samples',
'source_first_published': 'source_first_published'
}
# Specify default ordering
ordering = ('_score', '-num_total_samples', 'id', 'title', 'description', '-source_first_published')
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
'technology': {
'field': 'technology',
'facet': TermsFacet,
'enabled': True # These are enabled by default, which is more expensive but more simple.
},
'organism_names': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': True,
'options': {
'size': 999999
}
},
'platform_accession_codes': {
'field': 'platform_accession_codes',
'facet': TermsFacet,
'enabled': True,
'global': False,
'options': {
'size': 999999
}
},
'has_publication': {
'field': 'has_publication',
'facet': TermsFacet,
'enabled': True,
'global': False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
'technology_global': {
'field': 'technology',
'facet': TermsFacet,
'enabled': False,
'global': True
},
'organism_names_global': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'platform_names_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'has_publication_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
},
}
faceted_search_param = 'facet'
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data['facets'] = self.transform_es_facets(response.data['facets'])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet['buckets']:
if field == 'has_publication':
filter_group[bucket['key_as_string']] = bucket['total_samples']['value']
else:
filter_group[bucket['key']] = bucket['total_samples']['value']
result[field] = filter_group
return result
##
# Dataset
##
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(name='get', decorator=swagger_auto_schema(operation_description="View a single Dataset.",manual_parameters=[
openapi.Parameter(
name='details', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)]))
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None)) # partial updates not supported
@method_decorator(name='put', decorator=swagger_auto_schema(operation_description="""
Modify an existing Dataset.
Set `start` to `true` along with a valid activated API token (from `/token/`) to begin smashing and delivery.
You must also supply `email_address` with `start`, though this will never be serialized back to you.
"""))
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = 'id'
@staticmethod
def _should_display_on_engagement_bot(email: str) -> bool:
return email is not None \
and email.find("cansav09") != 0 \
and email.find("arielsvn") != 0 \
and email.find("jaclyn.n.taroni") != 0 \
and email.find("kurt.wheeler") != 0 \
and email.find("greenescientist") != 0 \
and email.find("@alexslemonade.org") == -1 \
and email.find("miserlou") != 0 \
and email.find("michael.zietz@gmail.com") != 0 \
and email.find("d.prasad") != 0 \
and email.find("daniel.himmelstein@gmail.com") != 0 \
and email.find("dv.prasad991@gmail.com") != 0
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that. """
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data['data'].keys():
accessions = new_data['data'][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(experiment.samples.filter(is_processed=True, organism__in=qn_organisms).values_list('accession_code', flat=True))
new_data['data'][key] = sample_codes
if old_object.is_processed:
raise APIException("You may not update Datasets which have already been processed")
if new_data.get('start'):
# Make sure we have a valid activated token.
token_id = self.request.data.get('token_id', None)
if not token_id:
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
raise APIException("You must provide an active API token ID")
supplied_email_address = self.request.data.get('email_address', None)
email_ccdl_ok = self.request.data.get('email_ccdl_ok', False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if supplied_email_address is not None:
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get('no_send_job', False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException("Unable to queue download job. Something has gone"
" wrong and we have been notified about it.")
serializer.validated_data['is_processing'] = True
obj = serializer.save()
if settings.RUNNING_IN_CLOUD and settings.ENGAGEMENTBOT_WEBHOOK is not None \
and DatasetView._should_display_on_engagement_bot(supplied_email_address):
try:
try:
remote_ip = get_client_ip(self.request)
city = requests.get('https://ipapi.co/' + remote_ip + '/json/', timeout=10).json()['city']
except Exception:
city = "COULD_NOT_DETERMINE"
new_user_text = "New user " + supplied_email_address + " from " + city + " [" + remote_ip + "] downloaded a dataset! (" + str(old_object.id) + ")"
webhook_url = settings.ENGAGEMENTBOT_WEBHOOK
slack_json = {
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments":[
{ "color": "good",
"text": new_user_text
}
]
}
response = requests.post(
webhook_url,
json=slack_json,
headers={'Content-Type': 'application/json'},
timeout=10
)
except Exception as e:
# It doens't really matter if this didn't work
logger.error(e)
pass
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data['data'] = old_data
serializer.validated_data['aggregate_by'] = old_aggregate
serializer.save()
class CreateApiTokenView(generics.CreateAPIView):
"""
token_create
There're several endpoints like [/dataset](#tag/dataset) and [/results](#tag/results) that return
S3 urls where users can download the files we produce, however in order to get those files people
need to accept our terms of use by creating a token and activating it.
```
POST /token
PUT /token/{token-id} is_active=True
```
The token id needs to be sent on the `API_KEY` header on http requests.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
model = APIToken
serializer_class = APITokenSerializer
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None))
class APITokenView(generics.RetrieveUpdateAPIView):
"""
Read and modify Api Tokens.
get:
Return details about a specific token.
put:
This can be used to activate a specific token by sending `is_activated: true`.
"""
model = APIToken
lookup_field = 'id'
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
##
# Experiments
##
class ExperimentList(generics.ListAPIView):
""" Paginated list of all experiments. Advanced filtering can be done with the `/search` endpoint. """
model = Experiment
queryset = Experiment.public_objects.all()
serializer_class = ExperimentSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'description',
'accession_code',
'alternate_accession_code',
'source_database',
'source_url',
'has_publication',
'publication_title',
'publication_doi',
'pubmed_id',
'organisms',
'submitter_institution',
'created_at',
'last_modified',
'source_first_published',
'source_last_modified',
)
class ExperimentDetail(generics.RetrieveAPIView):
""" Retrieve details for an experiment given it's accession code """
lookup_field = "accession_code"
queryset = Experiment.public_objects.all()
serializer_class = DetailedExperimentSerializer
##
# Samples
##
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='dataset_id', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns samples that are added to a dataset.",
),
openapi.Parameter(
name='experiment_accession_code', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns only the samples associated with an experiment accession code.",
),
openapi.Parameter(
name='accession_codes', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Provide a list of sample accession codes separated by commas and the endpoint will only return information about these samples.",
),
]))
class SampleList(generics.ListAPIView):
""" Returns detailed information about Samples """
model = Sample
serializer_class = DetailedSampleSerializer
filter_backends = (filters.OrderingFilter, DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('-is_processed')
filterset_fields = (
'title',
'organism',
'source_database',
'source_archive_url',
'has_raw',
'platform_name',
'technology',
'manufacturer',
'sex',
'age',
'specimen_part',
'genotype',
'disease',
'disease_stage',
'cell_line',
'treatment',
'race',
'subject',
'compound',
'time',
'is_processed',
'is_public'
)
def get_queryset(self):
"""
ref https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
"""
queryset = Sample.public_objects \
.prefetch_related('organism') \
.prefetch_related('results') \
.prefetch_related('results__processor') \
.prefetch_related('results__computationalresultannotation_set') \
.prefetch_related('results__computedfile_set') \
.filter(**self.get_query_params_filters())
# case insensitive search https://docs.djangoproject.com/en/2.1/ref/models/querysets/#icontains
filter_by = self.request.query_params.get('filter_by', None)
if filter_by:
queryset = queryset.filter(Q(accession_code__icontains=filter_by) |
Q(title__icontains=filter_by) |
Q(sex__icontains=filter_by) |
Q(age__icontains=filter_by) |
Q(specimen_part__icontains=filter_by) |
Q(genotype__icontains=filter_by) |
Q(disease__icontains=filter_by) |
Q(disease_stage__icontains=filter_by) |
Q(cell_line__icontains=filter_by) |
Q(treatment__icontains=filter_by) |
Q(race__icontains=filter_by) |
Q(subject__icontains=filter_by) |
Q(compound__icontains=filter_by) |
Q(time__icontains=filter_by))
return queryset
def get_query_params_filters(self):
""" We do advanced filtering on the queryset depending on the query parameters.
This returns the parameters that should be used for that. """
filter_dict = dict()
ids = self.request.query_params.get('ids', None)
if ids is not None:
ids = [int(x) for x in ids.split(',')]
filter_dict['pk__in'] = ids
experiment_accession_code = self.request.query_params.get('experiment_accession_code', None)
if experiment_accession_code:
experiment = get_object_or_404(Experiment.objects.values('id'), accession_code=experiment_accession_code)
filter_dict['experiments__in'] = [experiment['id']]
accession_codes = self.request.query_params.get('accession_codes', None)
if accession_codes:
accession_codes = accession_codes.split(',')
filter_dict['accession_code__in'] = accession_codes
dataset_id = self.request.query_params.get('dataset_id', None)
if dataset_id:
dataset = get_object_or_404(Dataset, id=dataset_id)
# Python doesn't provide a prettier way of doing this that I know about.
filter_dict['accession_code__in'] = [item for sublist in dataset.data.values() for item in sublist]
# Accept Organism in both name and ID form
organism_name = self.request.query_params.get('organism__name', None)
if organism_name:
filter_dict['organism__name'] = organism_name
return filter_dict
class SampleDetail(generics.RetrieveAPIView):
""" Retrieve the details for a Sample given it's accession code """
lookup_field = "accession_code"
queryset = Sample.public_objects.all()
serializer_class = DetailedSampleSerializer
##
# Processor
##
class ProcessorList(generics.ListAPIView):
"""List all processors."""
queryset = Processor.objects.all()
serializer_class = ProcessorSerializer
##
# Results
##
class ComputationalResultsList(generics.ListAPIView):
"""
computational_results_list
This lists all `ComputationalResult`. Each one contains meta-information about the output of a computer process. (Ex Salmon).
This can return valid S3 urls if a valid [token](#tag/token) is sent in the header `HTTP_API_KEY`.
"""
queryset = ComputationalResult.public_objects.all()
def get_serializer_class(self):
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return ComputationalResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return ComputationalResultSerializer
def filter_queryset(self, queryset):
filter_dict = self.request.query_params.dict()
filter_dict.pop('limit', None)
filter_dict.pop('offset', None)
return queryset.filter(**filter_dict)
##
# Search Filter Models
##
class OrganismList(generics.ListAPIView):
"""
Unpaginated list of all the available organisms.
"""
queryset = Organism.objects.all()
serializer_class = OrganismSerializer
paginator = None
class PlatformList(generics.ListAPIView):
"""
Unpaginated list of all the available "platform" information
"""
serializer_class = PlatformSerializer
paginator = None
def get_queryset(self):
return Sample.public_objects.all().values("platform_accession_code", "platform_name").distinct()
class InstitutionList(generics.ListAPIView):
"""
Unpaginated list of all the available "institution" information
"""
serializer_class = InstitutionSerializer
paginator = None
def get_queryset(self):
return Experiment.public_objects.all().values("submitter_institution").distinct()
##
# Jobs
##
class SurveyJobList(generics.ListAPIView):
"""
List of all SurveyJob.
"""
model = SurveyJob
queryset = SurveyJob.objects.all()
serializer_class = SurveyJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = SurveyJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
class DownloaderJobList(generics.ListAPIView):
"""
List of all DownloaderJob
"""
model = DownloaderJob
queryset = DownloaderJob.objects.all()
serializer_class = DownloaderJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = DownloaderJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
class ProcessorJobList(generics.ListAPIView):
"""
List of all ProcessorJobs.
"""
model = ProcessorJob
queryset = ProcessorJob.objects.all()
serializer_class = ProcessorJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = ProcessorJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
###
# Statistics
###
class AboutStats(APIView):
""" Returns general stats for the site, used in the about page """
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
# static values for now
dummy = request.query_params.dict().pop('dummy', None)
if dummy:
# add a dummy response, calculated these on 09/25/2019
result = {
'samples_available': 904953 + 391022,
'total_size_in_bytes': 832195361132962,
'supported_organisms': 43 + 159,
'experiments_processed': 35785 + 8661
}
return Response(result)
result = {
'samples_available': self._get_samples_available(),
'total_size_in_bytes': OriginalFile.objects.aggregate(total_size=Sum('size_in_bytes'))['total_size'],
'supported_organisms': self._get_supported_organisms(),
'experiments_processed': self._get_experiments_processed()
}
return Response(result)
def _get_experiments_processed(self):
""" total experiments with at least one sample processed """
experiments_with_sample_processed = Experiment.objects\
.annotate(
processed_samples_count=Count('samples', filter=Q(samples__is_processed=True)),
)\
.filter(Q(processed_samples_count__gt=1))\
.count()
experiments_with_sample_quant = ComputedFile.objects\
.filter(filename='quant.sf', result__samples__is_processed=False)\
.values_list('result__samples__experiments', flat=True)\
.distinct()\
.count()
return experiments_with_sample_processed + experiments_with_sample_quant
def _get_supported_organisms(self):
""" count organisms with qn targets or that have at least one sample with quant files """
organisms_with_qn_targets = Organism.objects.filter(qn_target__isnull=False).count()
organisms_without_qn_targets = Organism.objects.filter(
qn_target__isnull=True,
sample__is_processed=False,
sample__technology='RNA-SEQ',
sample__results__computedfile__filename='quant.sf'
).distinct().count()
return organisms_with_qn_targets + organisms_without_qn_targets
def _get_samples_available(self):
""" count the total number of samples that are processed or that have a quant.sf file associated with them """
processed_samples = Sample.objects.filter(is_processed=True).count()
unprocessed_samples_with_quant = Sample.objects.filter(
is_processed=False,
technology='RNA-SEQ',
results__computedfile__filename='quant.sf'
).distinct().count()
return processed_samples + unprocessed_samples_with_quant
class Stats(APIView):
""" Statistics about the health of the system. """
@swagger_auto_schema(manual_parameters=[openapi.Parameter(
name='range', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=('day', 'week', 'month', 'year',)
)])
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop('range', None)
cached_stats = Stats.calculate_stats(range_param)
return Response(cached_stats)
@classmethod
def calculate_stats(cls, range_param):
data = {}
data['survey_jobs'] = cls._get_job_stats(SurveyJob.objects, range_param)
data['downloader_jobs'] = cls._get_job_stats(DownloaderJob.objects, range_param)
data['processor_jobs'] = cls._get_job_stats(ProcessorJob.objects, range_param)
data['experiments'] = cls._get_object_stats(Experiment.objects, range_param)
# processed and unprocessed samples stats
data['unprocessed_samples'] = cls._get_object_stats(Sample.objects.filter(is_processed=False), range_param, 'last_modified')
data['processed_samples'] = cls._get_object_stats(Sample.processed_objects, range_param, 'last_modified')
data['processed_samples']['last_hour'] = cls._samples_processed_last_hour()
data['processed_samples']['technology'] = {}
techs = Sample.processed_objects.values('technology').annotate(count=Count('technology'))
for tech in techs:
if not tech['technology'] or not tech['technology'].strip():
continue
data['processed_samples']['technology'][tech['technology']] = tech['count']
data['processed_samples']['organism'] = {}
organisms = Sample.processed_objects.values('organism__name').annotate(count=Count('organism__name'))
for organism in organisms:
if not organism['organism__name']:
continue
data['processed_samples']['organism'][organism['organism__name']] = organism['count']
data['processed_experiments'] = cls._get_object_stats(Experiment.processed_public_objects)
data['active_volumes'] = list(get_active_volumes())
data['dataset'] = cls._get_dataset_stats(range_param)
if range_param:
data['input_data_size'] = cls._get_input_data_size()
data['output_data_size'] = cls._get_output_data_size()
data.update(get_nomad_jobs_breakdown())
return data
EMAIL_USERNAME_BLACKLIST = [
'arielsvn',
'cansav09',
'd.prasad',
'daniel.himmelstein',
'dv.prasad991',
'greenescientist',
'jaclyn.n.taroni',
'kurt.wheeler91',
'michael.zietz',
'miserlou'
]
@classmethod
def _get_dataset_stats(cls, range_param):
"""Returns stats for processed datasets"""
filter_query = Q()
for username in Stats.EMAIL_USERNAME_BLACKLIST:
filter_query = filter_query | Q(email_address__startswith=username)
filter_query = filter_query | Q(email_address__endswith='@alexslemonade.org')
processed_datasets = Dataset.objects.filter(is_processed=True, email_address__isnull=False).exclude(filter_query)
result = processed_datasets.aggregate(
total=Count('id'),
aggregated_by_experiment=Count('id', filter=Q(aggregate_by='EXPERIMENT')),
aggregated_by_species=Count('id', filter=Q(aggregate_by='SPECIES')),
scale_by_none=Count('id', filter=Q(scale_by='NONE')),
scale_by_minmax=Count('id', filter=Q(scale_by='MINMAX')),
scale_by_standard=Count('id', filter=Q(scale_by='STANDARD')),
scale_by_robust=Count('id', filter=Q(scale_by='ROBUST')),
)
if range_param:
# We don't save the dates when datasets are processed, but we can use
# `last_modified`, since datasets aren't modified again after they are processed
result['timeline'] = cls._get_intervals(
processed_datasets,
range_param,
'last_modified'
).annotate(
total=Count('id'),
total_size=Sum('size_in_bytes')
)
return result
@classmethod
def _samples_processed_last_hour(cls):
current_date = datetime.now(tz=timezone.utc)
start = current_date - timedelta(hours=1)
return Sample.processed_objects.filter(last_modified__range=(start, current_date)).count()
@classmethod
def _get_input_data_size(cls):
total_size = OriginalFile.objects.filter(
sample__is_processed=True # <-- SLOW
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_output_data_size(cls):
total_size = ComputedFile.public_objects.all().filter(
s3_bucket__isnull=False,
s3_key__isnull=True
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_job_stats(cls, jobs, range_param):
start_filter = Q()
if range_param:
current_date = datetime.now(tz=timezone.utc)
start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}.get(range_param)
start_filter = start_filter | Q(start_time__gte=start_date) | Q(start_time__isnull=True)
result = jobs.filter(start_filter).aggregate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
open=Count('id', filter=Q(start_time__isnull=False,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
)
# via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
result['average_time'] = jobs.filter(start_filter).filter(
start_time__isnull=False,
end_time__isnull=False,
success=True
).aggregate(
average_time=Avg(F('end_time') - F('start_time'))
)['average_time']
if not result['average_time']:
result['average_time'] = 0
else:
result['average_time'] = result['average_time'].total_seconds()
if range_param:
result['timeline'] = cls._get_intervals(jobs, range_param) \
.annotate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True, success__isnull=True)),
open=Count('id', filter=Q(start_time__isnull=False, success__isnull=True)),
)
return result
@classmethod
def _get_object_stats(cls, objects, range_param=False, field='created_at'):
result = {
'total': objects.count()
}
if range_param:
result['timeline'] = cls._get_intervals(objects, range_param, field)\
.annotate(total=Count('id'))
return result
@classmethod
def _get_intervals(cls, objects, range_param, field='last_modified'):
range_to_trunc = {
'day': 'hour',
'week': 'day',
'month': 'day',
'year': 'month'
}
current_date = datetime.now(tz=timezone.utc)
range_to_start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}
# truncate the `last_modified` field by hour, day or month depending on the `range` param
# and annotate each object with that. This will allow us to count the number of objects
# on each interval with a single query
# ref https://stackoverflow.com/a/38359913/763705
return objects.annotate(start=Trunc(field, range_to_trunc.get(range_param), output_field=DateTimeField())) \
.values('start') \
.filter(start__gte=range_to_start_date.get(range_param))
###
# Transcriptome Indices
###
class TranscriptomeIndexList(generics.ListAPIView):
""" List all Transcriptome Indices. These are a special type of process result, necessary for processing other SRA samples. """
serializer_class = OrganismIndexSerializer
def get_queryset(self):
return OrganismIndex.objects.distinct("organism", "index_type")
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name='length', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="",
enum=('short', 'long',),
default='short'
),
]))
class TranscriptomeIndexDetail(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
def get_object(self):
organism_name = self.kwargs['organism_name'].upper()
length = self.request.query_params.get('length', 'short')
# Get the correct organism index object, serialize it, and return it
transcription_length = "TRANSCRIPTOME_" + length.upper()
try:
organism = Organism.objects.get(name=organism_name.upper())
organism_index = OrganismIndex.objects.exclude(s3_url__exact="")\
.distinct("organism", "index_type")\
.get(organism=organism, index_type=transcription_length)
return organism_index
except OrganismIndex.DoesNotExist:
raise Http404('Organism does not exists')
###
# Compendia
###
class CompendiaDetail(APIView):
"""
A very simple modified ComputedFile endpoint which only shows Compendia results.
"""
@swagger_auto_schema(deprecated=True)
def get(self, request, version, format=None):
computed_files = ComputedFile.objects.filter(is_compendia=True, is_public=True, is_qn_target=False).order_by('-created_at')
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
serializer = CompendiaWithUrlSerializer(computed_files, many=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
serializer = CompendiaSerializer(computed_files, many=True)
return Response(serializer.data)
###
# QN Targets
###
class QNTargetsAvailable(generics.ListAPIView):
"""
This is a list of all of the organisms which have available QN Targets
"""
serializer_class = OrganismSerializer
paginator = None
def get_queryset(self):
return Organism.get_objects_with_qn_targets()
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Eg `DANIO_RERIO`, `MUS_MUSCULUS`",
)], responses={404: 'QN Target not found for the given organism.'}))
class QNTargetsDetail(generics.RetrieveAPIView):
"""
Get a detailed view of the Quantile Normalization file for an organism.
"""
serializer_class = QNTargetSerializer
def get_object(self):
organism = self.kwargs['organism_name']
organism = organism.upper().replace(" ", "_")
try:
organism_id = Organism.get_object_for_name(organism).id
annotation = ComputationalResultAnnotation.objects.filter(
data__organism_id=organism_id,
data__is_qn=True
).order_by(
'-created_at'
).first()
qn_target = annotation.result.computedfile_set.first()
except Exception:
raise NotFound("Don't have a target for that organism!")
if not qn_target:
raise NotFound("Don't have a target for that organism!!")
return qn_target
##
# Computed Files
##
class ComputedFilesList(generics.ListAPIView):
"""
computed_files_list
ComputedFiles are representation of files created by data-refinery processes.
This can also be used to fetch all the compendia files we have generated with:
```
GET /computed_files?is_compendia=True&is_public=True
```
"""
queryset = ComputedFile.objects.all()
serializer_class = ComputedFileListSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = (
'id',
'is_qn_target',
'is_smashable',
'is_qc',
'is_compendia',
'quant_sf_only',
'svd_algorithm',
'compendia_version',
'created_at',
'last_modified',
)
ordering_fields = ('id', 'created_at', 'last_modified', 'compendia_version',)
ordering = ('-id',)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(ComputedFilesList, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
##
# Util
##
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '')
return ip
<|code_end|>
|
Create command to delete invalid compendias
### Context
https://github.com/AlexsLemonade/refinebio/pull/1734
Compendia need a QN target so they can be quantile normalized. We need 100 samples on the sample platform to create a QN target. Therefore any organisms without 100 samples on the same platform should be ineligible for a compendium.
### Problem or idea
We created a compendium for Bos Inidicus which you can see only has 57 samples: https://www.refine.bio/search?filter_order=organism&organism=bos_indicus
### Solution or next step
In https://github.com/AlexsLemonade/refinebio/pull/1734 I should have prevented us from trying to create a compendium for this situation.
However, we should still:
* Look into how the compendia was successful despite not having a QN target to do that step.
* Delete this and any other compendia that should have failed to be created in the first place.
| common/data_refinery_common/models/models.py
<|code_start|>import hashlib
import io
import os
import shutil
import pytz
import uuid
import boto3
from botocore.client import Config
from datetime import datetime
from functools import partial
from typing import Dict, Set
from django.db.models import Count, Prefetch, DateTimeField
from django.db.models.expressions import F, Q
from django.conf import settings
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import transaction
from django.db import models
from django.utils import timezone
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models.organism import Organism
from data_refinery_common.utils import get_env_variable, get_s3_url, calculate_file_size, calculate_sha1, FileUtils
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client('s3', config=Config(signature_version='s3v4'))
logger = get_and_configure_logger(__name__)
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
# We store what salmon ouptuts as its version, therefore for
# comparisions or defaults we shouldn't just store the version string,
# we need something with the pattern: 'salmon X.X.X'
CURRENT_SALMON_VERSION = 'salmon ' + get_env_variable("SALMON_VERSION", "0.13.1")
CHUNK_SIZE = 1024 * 256 # chunk_size is in bytes
"""
# First Order Classes
This represent the primary data types we will be querying
and filtering against.
"""
class PublicObjectsManager(models.Manager):
"""
Only returns objects that have is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class ProcessedObjectsManager(models.Manager):
"""
Only returns objects that have is_processed and is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_processed=True, is_public=True)
class Sample(models.Model):
"""
An individual sample.
"""
class Meta:
db_table = "samples"
base_manager_name = "public_objects"
get_latest_by = "created_at"
indexes = [
models.Index(fields=['accession_code']),
]
def __str__(self):
return self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_objects = ProcessedObjectsManager()
# Identifiers
accession_code = models.CharField(max_length=255, unique=True)
title = models.CharField(max_length=255, unique=False, blank=True)
# Relations
organism = models.ForeignKey(Organism, blank=True, null=True, on_delete=models.SET_NULL)
results = models.ManyToManyField('ComputationalResult', through='SampleResultAssociation')
original_files = models.ManyToManyField('OriginalFile', through='OriginalFileSampleAssociation')
computed_files = models.ManyToManyField('ComputedFile', through='SampleComputedFileAssociation')
experiments = models.ManyToManyField('Experiment', through='ExperimentSampleAssociation')
# Historical Properties
source_database = models.CharField(max_length=255, blank=False)
source_archive_url = models.CharField(max_length=255)
source_filename = models.CharField(max_length=255, blank=False)
source_absolute_file_path = models.CharField(max_length=255)
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Technological Properties
platform_accession_code = models.CharField(max_length=256, blank=True)
platform_name = models.CharField(max_length=256, blank=True)
technology = models.CharField(max_length=256, blank=True) # MICROARRAY, RNA-SEQ
manufacturer = models.CharField(max_length=256, blank=True)
protocol_info = JSONField(default=dict)
# Scientific Properties
sex = models.CharField(max_length=255, blank=True)
age = models.DecimalField(max_length=255, blank=True, max_digits=8, decimal_places=3, null=True)
specimen_part = models.CharField(max_length=255, blank=True)
genotype = models.CharField(max_length=255, blank=True)
disease = models.CharField(max_length=255, blank=True)
disease_stage = models.CharField(max_length=255, blank=True)
cell_line = models.CharField(max_length=255, blank=True)
treatment = models.CharField(max_length=255, blank=True)
race = models.CharField(max_length=255, blank=True)
subject = models.CharField(max_length=255, blank=True)
compound = models.CharField(max_length=255, blank=True)
time = models.CharField(max_length=255, blank=True)
# Crunch Properties
is_processed = models.BooleanField(default=False)
# Blacklisting
is_blacklisted = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Sample, self).save(*args, **kwargs)
def to_metadata_dict(self):
"""Render this Sample as a dict."""
metadata = {}
metadata['refinebio_title'] = self.title
metadata['refinebio_accession_code'] = self.accession_code
metadata['refinebio_organism'] = self.organism.name if self.organism else None
metadata['refinebio_source_database'] = self.source_database
metadata['refinebio_source_archive_url'] = self.source_archive_url
metadata['refinebio_sex'] = self.sex
metadata['refinebio_age'] = self.age or ''
metadata['refinebio_specimen_part'] = self.specimen_part
metadata['refinebio_genetic_information'] = self.genotype
metadata['refinebio_disease'] = self.disease
metadata['refinebio_disease_stage'] = self.disease_stage
metadata['refinebio_cell_line'] = self.cell_line
metadata['refinebio_treatment'] = self.treatment
metadata['refinebio_race'] = self.race
metadata['refinebio_subject'] = self.subject
metadata['refinebio_compound'] = self.compound
metadata['refinebio_time'] = self.time
metadata['refinebio_platform'] = self.pretty_platform
metadata['refinebio_annotations'] = [
data for data in self.sampleannotation_set.all().values_list('data', flat=True)
]
return metadata
# Returns a set of ProcessorJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_processor_jobs(self) -> Set:
processor_jobs = set()
for original_file in self.original_files.prefetch_related("processor_jobs").all():
for processor_job in original_file.processor_jobs.all():
processor_jobs.add(processor_job)
return processor_jobs
# Returns a set of DownloaderJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_downloader_jobs(self) -> Set:
downloader_jobs = set()
for original_file in self.original_files.prefetch_related("downloader_jobs").all():
for downloader_job in original_file.downloader_jobs.all():
downloader_jobs.add(downloader_job)
return downloader_jobs
def get_result_files(self):
""" Get all of the ComputedFile objects associated with this Sample """
return self.computed_files.all()
def get_most_recent_smashable_result_file(self):
""" Get the most recent of the ComputedFile objects associated with this Sample """
try:
latest_computed_file = self.computed_files.filter(
is_public=True,
is_smashable=True,
).latest()
# Access a property to make the query fire now.
latest_computed_file.id
return latest_computed_file
except Exception as e:
# This sample has no smashable files yet.
return None
def get_most_recent_quant_sf_file(self):
""" Returns the latest quant.sf file that was generated for this sample.
Note: We don't associate that file to the computed_files of this sample, that's
why we have to go through the computational results. """
return ComputedFile.objects\
.filter(result__in=self.results.all(), filename='quant.sf')\
.order_by('-created_at')\
.first()
@property
def pretty_platform(self):
""" Turns
[HT_HG-U133_Plus_PM] Affymetrix HT HG-U133+ PM Array Plate
into
Affymetrix HT HG-U133+ PM Array Plate (hthgu133pluspm)
"""
if ']' in self.platform_name:
platform_base = self.platform_name.split(']')[1].strip()
else:
platform_base = self.platform_name
return platform_base + ' (' + self.platform_accession_code + ')'
class SampleAnnotation(models.Model):
""" Semi-standard information associated with a Sample """
class Meta:
db_table = "sample_annotations"
base_manager_name = 'public_objects'
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(SampleAnnotation, self).save(*args, **kwargs)
class ProcessedPublicObjectsManager(models.Manager):
"""
Only returns Experiments that are is_public and have related is_processed Samples.
"""
def get_queryset(self):
return super().get_queryset().filter(
is_public=True,
num_processed_samples__gt=0)
class Experiment(models.Model):
""" An Experiment or Study """
class Meta:
db_table = "experiments"
base_manager_name = 'public_objects'
def __str__(self):
return "Experiment: " + self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_public_objects = ProcessedPublicObjectsManager()
# Relations
samples = models.ManyToManyField('Sample', through='ExperimentSampleAssociation')
organisms = models.ManyToManyField('Organism', through='ExperimentOrganismAssociation')
# Identifiers
accession_code = models.CharField(max_length=64, unique=True)
alternate_accession_code = models.CharField(max_length=64, unique=True, null=True)
# Historical Properties
source_database = models.CharField(max_length=32) # "ArrayExpress, "SRA", "GEO"
source_url = models.TextField()
# Properties
# I was always under the impression that TextFields were slower
# than CharFields, however the Postgres documentation disagrees:
# https://www.postgresql.org/docs/9.0/static/datatype-character.html
title = models.TextField()
description = models.TextField()
protocol_description = JSONField(default=dict)
technology = models.CharField(max_length=256, blank=True)
submitter_institution = models.CharField(max_length=256, blank=True)
has_publication = models.BooleanField(default=False)
publication_title = models.TextField(default="")
publication_doi = models.CharField(max_length=64, blank=True)
publication_authors = ArrayField(models.TextField(), default=list)
pubmed_id = models.CharField(max_length=32, blank=True)
source_first_published = models.DateTimeField(null=True)
source_last_modified = models.DateTimeField(null=True)
# Cached Computed Properties
num_total_samples = models.IntegerField(default=0)
num_processed_samples = models.IntegerField(default=0)
num_downloadable_samples = models.IntegerField(default=0)
sample_metadata_fields = ArrayField(models.TextField(), default=list)
organism_names = ArrayField(models.TextField(), default=list)
platform_names = ArrayField(models.TextField(), default=list)
platform_accession_codes = ArrayField(models.TextField(), default=list)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
if self.accession_code and not self.alternate_accession_code:
if self.accession_code.startswith('GSE'):
self.alternate_accession_code = 'E-GEOD-' + self.accession_code[3:]
elif self.accession_code.startswith('E-GEOD-'):
self.alternate_accession_code = 'GSE' + self.accession_code[7:]
return super(Experiment, self).save(*args, **kwargs)
def update_num_samples(self):
""" Update our cache values """
aggregates = self.samples.aggregate(
num_total_samples=Count('id'),
num_processed_samples=Count('id', filter=Q(is_processed=True)),
num_downloadable_samples=Count('id', filter=Q(is_processed=True, organism__qn_target__isnull=False))
)
self.num_total_samples = aggregates['num_total_samples']
self.num_processed_samples = aggregates['num_processed_samples']
self.num_downloadable_samples = aggregates['num_downloadable_samples']
self.save()
def to_metadata_dict(self):
""" Render this Experiment as a dict """
metadata = {}
metadata['title'] = self.title
metadata['accession_code'] = self.accession_code
metadata['organisms'] = [organism.name for organism in self.organisms.all()]
metadata['description'] = self.description
metadata['protocol_description'] = self.protocol_description
metadata['technology'] = self.technology
metadata['submitter_institution'] = self.submitter_institution
metadata['has_publication'] = self.has_publication
metadata['publication_title'] = self.publication_title
metadata['publication_doi'] = self.publication_doi
metadata['pubmed_id'] = self.pubmed_id
if self.source_first_published:
metadata['source_first_published'] = self.source_first_published.strftime(
'%Y-%m-%dT%H:%M:%S')
else:
metadata['source_first_published'] = ''
if self.source_last_modified:
metadata['source_last_modified'] = self.source_last_modified.strftime(
'%Y-%m-%dT%H:%M:%S')
else:
metadata['source_last_modified'] = ''
return metadata
def get_sample_metadata_fields(self):
""" Get all metadata fields that are non-empty for at least one sample in the experiment.
See https://github.com/AlexsLemonade/refinebio-frontend/issues/211 for why this is needed.
"""
fields = []
possible_fields = ['sex', 'age', 'specimen_part', 'genotype', 'disease', 'disease_stage',
'cell_line', 'treatment', 'race', 'subject', 'compound', 'time']
samples = self.samples.all()
for field in possible_fields:
for sample in samples:
if getattr(sample, field) != None and getattr(sample, field) != '':
fields.append(field)
break
return fields
def update_sample_metadata_fields(self):
self.sample_metadata_fields = self.get_sample_metadata_fields()
def update_organism_names(self):
self.organism_names = self.get_organism_names()
def update_platform_names(self):
self.platform_names = self.get_platform_names()
self.platform_accession_codes = self.get_platform_accession_codes()
def get_sample_technologies(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([sample.technology for sample in self.samples.all()]))
def get_organism_names(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([organism.name for organism in self.organisms.all()]))
def get_platform_names(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_name for sample in self.samples.all()]))
def get_platform_accession_codes(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_accession_code for sample in self.samples.all()]))
@property
def platforms(self):
""" Returns a list of related pipelines """
return list(set([sample.platform_name for sample in self.samples.all()]))
@property
def pretty_platforms(self):
""" Returns a prettified list of related pipelines """
return list(set([sample.pretty_platform for sample in self.samples.all()]))
@property
def processed_samples(self):
return list([sample.accession_code for sample in self.samples.all() if sample.is_processed == True])
@property
def downloadable_samples(self):
"""
Returns the accession codes of the downloadable samples in this experiment.
This is indexed on elastic search and used to count the number of samples
on the filters.
"""
return list(self.samples.filter(is_processed=True, organism__qn_target__isnull=False)\
.values_list('accession_code', flat=True))
class ExperimentAnnotation(models.Model):
""" Semi-standard information associated with an Experiment """
class Meta:
db_table = "experiment_annotations"
base_manager_name = 'public_objects'
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ExperimentAnnotation, self).save(*args, **kwargs)
class Pipeline(models.Model):
"""Pipeline that is associated with a series of ComputationalResult records."""
name = models.CharField(max_length=255)
steps = ArrayField(models.IntegerField(), default=list)
class Meta:
db_table = "pipelines"
class Processor(models.Model):
"""Processor associated with a certain ComputationalResult."""
name = models.CharField(max_length=255)
version = models.CharField(max_length=64)
docker_image = models.CharField(max_length=255)
environment = JSONField(default=dict)
class Meta:
db_table = "processors"
unique_together = ('name', 'version', 'docker_image', 'environment')
def __str__(self):
return "Processor: %s (version: %s, docker_image: %s)" % (self.name, self.version, self.docker_image)
class ComputationalResult(models.Model):
""" Meta-information about the output of a computer process. (Ex Salmon) """
class Meta:
db_table = "computational_results"
base_manager_name = 'public_objects'
def __str__(self):
processor_name_str = ""
if self.processor:
processor_name_str = ": " + str(self.processor.name)
return "ComputationalResult " + str(self.pk) + processor_name_str
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
commands = ArrayField(models.TextField(), default=list)
processor = models.ForeignKey(Processor, blank=True, null=True, on_delete=models.CASCADE)
samples = models.ManyToManyField('Sample', through='SampleResultAssociation')
# The Organism Index used to process the sample.
organism_index = models.ForeignKey('OrganismIndex', blank=True, null=True, on_delete=models.SET_NULL)
is_ccdl = models.BooleanField(default=True)
# Stats
time_start = models.DateTimeField(blank=True, null=True)
time_end = models.DateTimeField(blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResult, self).save(*args, **kwargs)
class ComputationalResultAnnotation(models.Model):
""" Non-standard information associated with an ComputationalResult """
class Meta:
db_table = "computational_result_annotations"
base_manager_name = 'public_objects'
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResultAnnotation, self).save(*args, **kwargs)
# TODO
# class Gene(models.Model):
""" A representation of a Gene """
# class Meta:
# db_table = "genes"
class OrganismIndex(models.Model):
""" A special type of process result, necessary for processing other SRA samples """
class Meta:
db_table = "organism_index"
base_manager_name = 'public_objects'
def __str__(self):
return "OrganismIndex " + str(self.pk) + ": " + self.organism.name + \
' [' + self.index_type + '] - ' + str(self.salmon_version)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
# ex., "TRANSCRIPTOME_LONG", "TRANSCRIPTOME_SHORT"
index_type = models.CharField(max_length=255)
# This corresponds to Ensembl's release number:
# http://ensemblgenomes.org/info/about/release_cycle
# Determined by hitting:
# http://rest.ensembl.org/info/software?content-type=application/json
source_version = models.CharField(max_length=255, default="93")
# The name of the genome assembly used which corresponds to 'GRCh38' in:
# ftp://ftp.ensembl.org/pub/release-93/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz
assembly_name = models.CharField(max_length=255, default="UNKNOWN")
# This matters, for instance salmon 0.9.0 indexes don't work with 0.10.0
salmon_version = models.CharField(max_length=255, default=CURRENT_SALMON_VERSION)
# We keep the director unextracted on the shared filesystem so all
# Salmon jobs can access it.
absolute_directory_path = models.CharField(max_length=255, blank=True, null=True, default="")
# S3 Information
s3_url = models.CharField(max_length=255, default="")
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OrganismIndex, self).save(*args, **kwargs)
"""
# Files
These are the database representations of files
which live on local disk, on ephemeral storage,
or on AWS cloud services.
"""
class OriginalFile(models.Model):
""" A representation of a file from an external source """
class Meta:
db_table = "original_files"
indexes = [
models.Index(fields=['filename']),
models.Index(fields=['source_filename']),
]
def __str__(self):
return "OriginalFile: " + self.get_display_name()
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# File Properties
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
size_in_bytes = models.BigIntegerField(blank=True, null=True)
sha1 = models.CharField(max_length=64)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Relations
samples = models.ManyToManyField('Sample', through='OriginalFileSampleAssociation')
processor_jobs = models.ManyToManyField('data_refinery_common.ProcessorJob', through='ProcessorJobOriginalFileAssociation')
downloader_jobs = models.ManyToManyField('data_refinery_common.DownloaderJob', through='DownloaderJobOriginalFileAssociation')
# Historical Properties
source_url = models.TextField()
is_archive = models.BooleanField(default=True)
source_filename = models.CharField(max_length=255, blank=False)
# Scientific Properties
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Crunch Properties
is_downloaded = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OriginalFile, self).save(*args, **kwargs)
def set_downloaded(self, absolute_file_path, filename = None):
""" Marks the file as downloaded, if `filename` is not provided it will
be parsed from the `absolute_file_path` """
self.is_downloaded = True
self.is_archive = FileUtils.is_archive(absolute_file_path)
self.absolute_file_path = absolute_file_path
self.filename = filename if filename else os.path.basename(absolute_file_path)
self.calculate_size()
self.calculate_sha1()
self.save()
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def get_display_name(self):
""" For dev convenience """
if not self.filename:
return self.source_filename
else:
return self.filename
def get_extension(self):
""" Returns the lowercased extension of the filename
Thanks to https://stackoverflow.com/a/541408/763705 """
return FileUtils.get_extension(self.filename)
def is_blacklisted(self):
return self.get_extension() in [".xml", ".chp", ".exp"]
def delete_local_file(self):
""" Deletes this file from the local file system."""
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception("Unexpected delete file exception.",
absolute_file_path=self.absolute_file_path
)
self.is_downloaded = False
self.save()
def has_blocking_jobs(self, own_processor_id=None) -> bool:
# If the file has a processor job that should not have been
# retried, then it still shouldn't be retried.
no_retry_processor_jobs = self.processor_jobs.filter(no_retry=True)
# If the file has a processor job that hasn't even started
# yet, then it doesn't need another.
incomplete_processor_jobs = self.processor_jobs.filter(
end_time__isnull=True,
success__isnull=True,
retried=False
)
if own_processor_id:
incomplete_processor_jobs = incomplete_processor_jobs.exclude(id=own_processor_id)
# Check if there's any jobs which should block another
# processing attempt.
blocking_jobs = no_retry_processor_jobs | incomplete_processor_jobs
return blocking_jobs.first() is not None
def needs_processing(self, own_processor_id=None) -> bool:
"""Returns False if original_file has been or is being processed.
Returns True otherwise.
If own_processor_id is supplied then it will be ignored so
that processor jobs can use this function without their job
being counted as currently processing this file.
"""
sample = self.samples.first()
if not sample:
return True
if self.has_blocking_jobs(own_processor_id):
return False
if sample.source_database == "SRA":
computed_file = sample.get_most_recent_smashable_result_file()
# If there's no smashable file then we should check the quant.sf file.
if not computed_file:
computed_file = sample.get_most_recent_quant_sf_file()
# If there's neither a quant.sf file nor a smashable file
# then we definitely need to process it.
if not computed_file:
return True
if computed_file.s3_bucket and computed_file.s3_key \
and computed_file.result.organism_index is not None \
and computed_file.result.organism_index.salmon_version == CURRENT_SALMON_VERSION:
# If the file wasn't computed with the latest
# version of salmon, then it should be rerun
# with the latest version of salmon.
return False
else:
# If this original_file has multiple samples (is an
# archive), and any of them haven't been processed, we'll
# need the entire archive in order to process any of them.
# A check to not re-processed the already processed
# samples in the archive will happen elsewhere before
# dispatching.
for sample in self.samples.all():
if not sample.is_processed:
return True
computed_file = sample.get_most_recent_smashable_result_file()
if not computed_file:
return True
if settings.RUNNING_IN_CLOUD \
and (computed_file.s3_bucket is None or computed_file.s3_key is None):
return True
return False
# If we aren't sure, prefer reprocessing over never processing.
return True
def needs_downloading(self, own_processor_id=None) -> bool:
"""Determine if a file needs to be downloaded.
This is true if the file has already been downloaded and lost
without getting processed.
"""
# If the file is downloaded and the file actually exists on disk,
# then it doens't need to be downloaded.
if self.absolute_file_path \
and os.path.exists(self.absolute_file_path):
return False
unstarted_downloader_jobs = self.downloader_jobs.filter(
start_time__isnull=True,
success__isnull=True,
retried=False
)
# If the file has a downloader job that hasn't even started yet,
# then it doesn't need another.
if unstarted_downloader_jobs.count() > 0:
return False
# If this file has been processed, then it doesn't need to be downloaded again.
return self.needs_processing(own_processor_id)
def is_affy_data(self) -> bool:
"""Return true if original_file is a CEL file or a gzipped CEL file.
"""
upper_name = self.source_filename.upper()
return (len(upper_name) > 4 and upper_name[-4:] == ".CEL") \
or (len(upper_name) > 7 and upper_name[-7:] == ".CEL.GZ")
class ComputedFile(models.Model):
""" A representation of a file created by a data-refinery process """
class Meta:
db_table = "computed_files"
get_latest_by = "created_at"
indexes = [
models.Index(fields=['filename']),
]
def __str__(self):
return "ComputedFile: " + str(self.filename)
SVD_ALGORITHM_CHOICES = (
('NONE', 'None'),
('RANDOMIZED', 'randomized'),
('ARPACK', 'arpack'),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Object relations
samples = models.ManyToManyField('Sample', through='SampleComputedFileAssociation')
# File related
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
# TODO: make this work w/ migrations:
# absolute_file_path = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField()
sha1 = models.CharField(max_length=64)
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
# Scientific
is_smashable = models.BooleanField(default=False)
is_qc = models.BooleanField(default=False)
is_qn_target = models.BooleanField(default=False)
# Compendia details
quant_sf_only = models.BooleanField(default=False)
is_compendia = models.BooleanField(default=False)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text='The SVD algorithm that was used to generate the file.'
)
compendia_organism = models.ForeignKey(Organism,
blank=True,
null=True,
on_delete=models.CASCADE
)
compendia_version = models.IntegerField(blank=True, null=True)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputedFile, self).save(*args, **kwargs)
def sync_to_s3(self, s3_bucket=None, s3_key=None) -> bool:
""" Syncs a file to AWS S3.
"""
if not settings.RUNNING_IN_CLOUD:
return True
self.s3_bucket = s3_bucket
self.s3_key = s3_key
try:
S3.upload_file(
self.absolute_file_path,
s3_bucket,
s3_key,
ExtraArgs={
'ACL': 'public-read',
'StorageClass': 'STANDARD_IA'
}
)
self.save()
except Exception as e:
logger.exception('Error uploading computed file to S3',
computed_file_id=self.pk,
s3_key=self.s3_key,
s3_bucket=self.s3_bucket
)
self.s3_bucket = None
self.s3_key = None
return False
return True
def sync_from_s3(self, force=False, path=None):
""" Downloads a file from S3 to the local file system.
Returns the absolute file path.
"""
path = path if path is not None else self.absolute_file_path
if not settings.RUNNING_IN_CLOUD and not force:
if os.path.exists(path):
return path
else:
# If the file doesn't exist at path and we're not
# running in the cloud, then the file is almost
# certainly at its absolute_file_path because it never got deleted.
if os.path.exists(self.absolute_file_path):
shutil.copyfile(self.absolute_file_path, path)
return path
else:
# We don't have the file :(
return None
target_directory = os.path.dirname(self.absolute_file_path)
os.makedirs(target_directory, exist_ok=True)
try:
S3.download_file(
self.s3_bucket,
self.s3_key,
path
)
# Veryify sync integrity
synced_sha1 = calculate_sha1(path)
if self.sha1 != synced_sha1:
raise AssertionError("SHA1 of downloaded ComputedFile doesn't match database SHA1!")
return path
except Exception as e:
logger.exception(e, computed_file_id=self.pk)
return None
def change_s3_location(self, new_bucket: str, new_key: str) -> bool:
"""Moves the file from its current location in S3.
The new location will be set based on `new_bucket` and
`new_key`. The s3_bucket and s3_key properties will be updated
to reflect this on a successful move.
"""
old_bucket = self.s3_bucket
old_key = self.s3_key
copy_source = {
'Bucket': old_bucket,
'Key': old_key
}
try:
response = S3.copy_object(Bucket=new_bucket,
CopySource=copy_source,
Key=new_key)
except:
logger.exception("Could not copy computed file within S3",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key)
return False
try:
self.s3_bucket = new_bucket
self.s3_key = new_key
self.save()
except:
logger.exception("Could not save computed file after it was copied!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key)
return False
try:
response = S3.delete_object(Bucket=old_bucket, Key=old_key)
except:
logger.exception("Could not delete computed file after it was copied and saved!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key)
return False
return True
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def delete_local_file(self, force=False):
""" Deletes a file from the path and actually removes it from the file system."""
if not settings.RUNNING_IN_CLOUD and not force:
return
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception("Unexpected delete file exception.",
absolute_file_path=self.absolute_file_path
)
def delete_s3_file(self, force=False):
# If we're not running in the cloud then we shouldn't try to
# delete something from S3 unless force is set.
if not settings.RUNNING_IN_CLOUD and not force:
return False
try:
S3.delete_object(Bucket=self.s3_bucket, Key=self.s3_key)
return True
except:
logger.exception("Failed to delete S3 object for Computed File.",
computed_file=self.id,
s3_object=self.s3_key)
return False
def get_synced_file_path(self, force=False, path=None):
""" Fetches the absolute file path to this ComputedFile, fetching from S3 if it
isn't already available locally. """
if path:
if os.path.exists(path):
return path
else:
return self.sync_from_s3(force, path)
else:
if os.path.exists(self.absolute_file_path):
return self.absolute_file_path
else:
return self.sync_from_s3(force)
@property
def s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
return self.get_s3_url()
def get_s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': self.s3_bucket,
'Key': self.s3_key
},
ExpiresIn=(60 * 60 * 7 * 24) # 7 days in seconds.
)
else:
return None
class Dataset(models.Model):
""" A Dataset is a desired set of experiments/samples to smash and download """
AGGREGATE_CHOICES = (
('ALL', 'All'),
('EXPERIMENT', 'Experiment'),
('SPECIES', 'Species')
)
SCALE_CHOICES = (
('NONE', 'None'),
('MINMAX', 'Minmax'),
('STANDARD', 'Standard'),
('ROBUST', 'Robust'),
)
SVD_ALGORITHM_CHOICES = (
('NONE', 'None'),
('RANDOMIZED', 'randomized'),
('ARPACK', 'arpack'),
)
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Experiments and samples live here: {'E-ABC-1': ['SAMP1', 'SAMP2']}
# This isn't going to be queryable, so we can use JSON-in-text, just make
# sure we validate properly in and out!
data = JSONField(default=dict, help_text="This is a dictionary where the keys are experiment accession codes and the values are lists with sample accession codes. Eg: `{'E-ABC-1': ['SAMP1', 'SAMP2']}`")
# Processing properties
aggregate_by = models.CharField(max_length=255, choices=AGGREGATE_CHOICES, default="EXPERIMENT", help_text="Specifies how samples are [aggregated](http://docs.refine.bio/en/latest/main_text.html#aggregations).")
scale_by = models.CharField(max_length=255, choices=SCALE_CHOICES, default="NONE", help_text="Specifies options for [transformations](http://docs.refine.bio/en/latest/main_text.html#transformations).")
quantile_normalize = models.BooleanField(
default=True,
help_text="Part of the advanced options. Allows [skipping quantile normalization](http://docs.refine.bio/en/latest/faq.html#what-does-it-mean-to-skip-quantile-normalization-for-rna-seq-samples) for RNA-Seq samples."
)
quant_sf_only = models.BooleanField(default=False, help_text="Include only quant.sf files in the generated dataset.")
svd_algorithm = models.CharField(max_length=255, choices=SVD_ALGORITHM_CHOICES, default="NONE", help_text="Specifies choice of SVD algorithm")
# State properties
is_processing = models.BooleanField(default=False) # Data is still editable when False
is_processed = models.BooleanField(default=False) # Result has been made
is_available = models.BooleanField(default=False) # Result is ready for delivery
processor_jobs = models.ManyToManyField('data_refinery_common.ProcessorJob', through='ProcessorJobDataSetAssociation')
# Fail handling
success = models.NullBooleanField(null=True)
failure_reason = models.TextField()
# Delivery properties
email_address = models.CharField(max_length=255, blank=True, null=True)
email_ccdl_ok = models.BooleanField(default=False)
email_sent = models.BooleanField(default=False) # Result has been made
expires_on = models.DateTimeField(blank=True, null=True)
# Deliverables
s3_bucket = models.CharField(max_length=255)
s3_key = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField(blank=True, null=True, default=0, help_text="Contains the size in bytes of the processed dataset.")
sha1 = models.CharField(max_length=64, null=True, default='')
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Dataset, self).save(*args, **kwargs)
def get_samples(self):
""" Retuns all of the Sample objects in this Dataset """
all_samples = []
for sample_list in self.data.values():
all_samples = all_samples + sample_list
all_samples = list(set(all_samples))
return Sample.objects.filter(accession_code__in=all_samples)
def get_experiments(self):
""" Retuns all of the Experiments objects in this Dataset """
all_experiments = self.data.keys()
return Experiment.objects.filter(accession_code__in=all_experiments)
def get_samples_by_experiment(self):
""" Returns a dict of sample QuerySets, for samples grouped by experiment. """
all_samples = {}
for experiment, samples in self.data.items():
all_samples[experiment] = Sample.objects.filter(accession_code__in=samples)
return all_samples
def get_samples_by_species(self):
""" Returns a dict of sample QuerySets, for samples grouped by species. """
by_species = {}
all_samples = self.get_samples()
for sample in all_samples:
if not by_species.get(sample.organism.name, None):
by_species[sample.organism.name] = [sample]
else:
by_species[sample.organism.name].append(sample)
return by_species
def get_aggregated_samples(self):
""" Uses aggregate_by to return a smasher-ready sample dict. """
if self.aggregate_by == "ALL":
return {'ALL': self.get_samples()}
elif self.aggregate_by == "EXPERIMENT":
return self.get_samples_by_experiment()
else:
return self.get_samples_by_species()
def is_cross_technology(self):
""" Determine if this involves both Microarray + RNASeq"""
if len(self.get_samples().values('technology').distinct()) > 1:
return True
else:
return False
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': self.s3_bucket,
'Key': self.s3_key
},
ExpiresIn=(60 * 60 * 7 * 24) # 7 days in seconds.
)
else:
return None
def s3_url(self):
""" Render the resulting S3 URL """
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def has_email(self):
""" Returns if the email is set or not """
return bool(self.email_address)
class APIToken(models.Model):
""" Required for starting a smash job """
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Activation
is_activated = models.BooleanField(default=False)
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(APIToken, self).save(*args, **kwargs)
@property
def terms_and_conditions(self):
""" """
return settings.TERMS_AND_CONDITIONS
"""
# Associations
These represent the relationships between items in the other tables.
"""
class ExperimentSampleAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_sample_associations"
unique_together = ('experiment', 'sample')
class ExperimentOrganismAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_organism_associations"
unique_together = ('experiment', 'organism')
class DownloaderJobOriginalFileAssociation(models.Model):
downloader_job = models.ForeignKey(
"data_refinery_common.DownloaderJob", blank=False, null=False, on_delete=models.CASCADE)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "downloaderjob_originalfile_associations"
unique_together = ('downloader_job', 'original_file')
class ProcessorJobOriginalFileAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "processorjob_originalfile_associations"
unique_together = ('processor_job', 'original_file')
class ProcessorJobDatasetAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE)
dataset = models.ForeignKey(Dataset, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "processorjob_dataset_associations"
class OriginalFileSampleAssociation(models.Model):
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "original_file_sample_associations"
unique_together = ('original_file', 'sample')
class SampleResultAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "sample_result_associations"
unique_together = ('result', 'sample')
class SampleComputedFileAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
computed_file = models.ForeignKey(
ComputedFile, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "sample_computed_file_associations"
unique_together = ('sample', 'computed_file')
class ExperimentResultAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_result_associations"
unique_together = ('result', 'experiment')
<|code_end|>
workers/data_refinery_workers/processors/management/commands/create_qn_target.py
<|code_start|>import requests
import sys
from django.core.management.base import BaseCommand
from django.db.models import Count
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
Dataset,
Experiment,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SampleComputedFileAssociation,
)
from data_refinery_workers.processors import qn_reference, utils
logger = get_and_configure_logger(__name__)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organism",
type=str,
help=("Name of organism"))
parser.add_argument(
"--all",
default=False,
action='store_true',
help=("Perform for all organisms that meet <min> processed samples"))
parser.add_argument(
"--min",
type=int,
default=100,
help=("Minimum number of processed samples"))
parser.add_argument(
"--platform",
type=str,
default=None,
help=("Name of platform")
)
parser.add_argument(
"--job_id",
type=int,
default=None,
help=("ID of job to run.")
)
def handle(self, *args, **options):
"""
"""
if not options["job_id"]:
if options["organism"] is None and not options["all"]:
logger.error("You must specify an organism or --all")
sys.exit(1)
if options["organism"] and (options.get('organism', '') != "ALL"):
organisms = [Organism.get_object_for_name(options["organism"].upper())]
else:
organisms = Organism.objects.all()
for organism in organisms:
samples = Sample.processed_objects.filter(organism=organism, has_raw=True, technology="MICROARRAY", is_processed=True)
if samples.count() == 0:
logger.error("No processed samples for organism.",
organism=organism,
count=samples.count()
)
continue
if samples.count() < options['min']:
logger.error("Proccessed samples don't meet minimum threshhold",
organism=organism,
count=samples.count(),
min=options["min"]
)
continue
if options["platform"] is None:
platform_counts = samples.values('platform_accession_code').annotate(dcount=Count('platform_accession_code')).order_by('-dcount')
biggest_platform = platform_counts[0]['platform_accession_code']
else:
biggest_platform = options["platform"]
sample_codes_results = Sample.processed_objects.filter(
platform_accession_code=biggest_platform,
has_raw=True,
technology="MICROARRAY",
organism=organism,
is_processed=True).values('accession_code')
sample_codes = [res['accession_code'] for res in sample_codes_results]
dataset = Dataset()
dataset.data = {organism.name + '_(' + biggest_platform + ')': sample_codes}
dataset.aggregate_by = "ALL"
dataset.scale_by = "NONE"
dataset.quantile_normalize = False
dataset.save()
job = ProcessorJob()
job.pipeline_applied = "QN_REFERENCE"
job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dataset
pjda.save()
final_context = qn_reference.create_qn_reference(job.pk)
if final_context['success']:
print(":D")
self.stdout.write("Target file: " + final_context['target_file'])
self.stdout.write("Target S3: " + str(final_context['computed_files'][0].get_s3_url()))
else:
print(":(")
else:
qn_reference.create_qn_reference(options["job_id"])
<|code_end|>
workers/data_refinery_workers/processors/management/commands/remove_invalid_computational_results.py
<|code_start|><|code_end|>
| common/data_refinery_common/models/models.py
<|code_start|>import hashlib
import io
import os
import shutil
import pytz
import uuid
import boto3
from botocore.client import Config
from datetime import datetime
from functools import partial
from typing import Dict, Set
from django.db.models import Count, Prefetch, DateTimeField
from django.db.models.expressions import F, Q
from django.conf import settings
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import transaction
from django.db import models
from django.utils import timezone
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models.organism import Organism
from data_refinery_common.utils import get_env_variable, get_s3_url, calculate_file_size, calculate_sha1, FileUtils
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client('s3', config=Config(signature_version='s3v4'))
logger = get_and_configure_logger(__name__)
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
# We store what salmon ouptuts as its version, therefore for
# comparisions or defaults we shouldn't just store the version string,
# we need something with the pattern: 'salmon X.X.X'
CURRENT_SALMON_VERSION = 'salmon ' + get_env_variable("SALMON_VERSION", "0.13.1")
CHUNK_SIZE = 1024 * 256 # chunk_size is in bytes
"""
# First Order Classes
This represent the primary data types we will be querying
and filtering against.
"""
class PublicObjectsManager(models.Manager):
"""
Only returns objects that have is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class ProcessedObjectsManager(models.Manager):
"""
Only returns objects that have is_processed and is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_processed=True, is_public=True)
class Sample(models.Model):
"""
An individual sample.
"""
class Meta:
db_table = "samples"
base_manager_name = "public_objects"
get_latest_by = "created_at"
indexes = [
models.Index(fields=['accession_code']),
]
def __str__(self):
return self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_objects = ProcessedObjectsManager()
# Identifiers
accession_code = models.CharField(max_length=255, unique=True)
title = models.CharField(max_length=255, unique=False, blank=True)
# Relations
organism = models.ForeignKey(Organism, blank=True, null=True, on_delete=models.SET_NULL)
results = models.ManyToManyField('ComputationalResult', through='SampleResultAssociation')
original_files = models.ManyToManyField('OriginalFile', through='OriginalFileSampleAssociation')
computed_files = models.ManyToManyField('ComputedFile', through='SampleComputedFileAssociation')
experiments = models.ManyToManyField('Experiment', through='ExperimentSampleAssociation')
# Historical Properties
source_database = models.CharField(max_length=255, blank=False)
source_archive_url = models.CharField(max_length=255)
source_filename = models.CharField(max_length=255, blank=False)
source_absolute_file_path = models.CharField(max_length=255)
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Technological Properties
platform_accession_code = models.CharField(max_length=256, blank=True)
platform_name = models.CharField(max_length=256, blank=True)
technology = models.CharField(max_length=256, blank=True) # MICROARRAY, RNA-SEQ
manufacturer = models.CharField(max_length=256, blank=True)
protocol_info = JSONField(default=dict)
# Scientific Properties
sex = models.CharField(max_length=255, blank=True)
age = models.DecimalField(max_length=255, blank=True, max_digits=8, decimal_places=3, null=True)
specimen_part = models.CharField(max_length=255, blank=True)
genotype = models.CharField(max_length=255, blank=True)
disease = models.CharField(max_length=255, blank=True)
disease_stage = models.CharField(max_length=255, blank=True)
cell_line = models.CharField(max_length=255, blank=True)
treatment = models.CharField(max_length=255, blank=True)
race = models.CharField(max_length=255, blank=True)
subject = models.CharField(max_length=255, blank=True)
compound = models.CharField(max_length=255, blank=True)
time = models.CharField(max_length=255, blank=True)
# Crunch Properties
is_processed = models.BooleanField(default=False)
# Blacklisting
is_blacklisted = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Sample, self).save(*args, **kwargs)
def to_metadata_dict(self):
"""Render this Sample as a dict."""
metadata = {}
metadata['refinebio_title'] = self.title
metadata['refinebio_accession_code'] = self.accession_code
metadata['refinebio_organism'] = self.organism.name if self.organism else None
metadata['refinebio_source_database'] = self.source_database
metadata['refinebio_source_archive_url'] = self.source_archive_url
metadata['refinebio_sex'] = self.sex
metadata['refinebio_age'] = self.age or ''
metadata['refinebio_specimen_part'] = self.specimen_part
metadata['refinebio_genetic_information'] = self.genotype
metadata['refinebio_disease'] = self.disease
metadata['refinebio_disease_stage'] = self.disease_stage
metadata['refinebio_cell_line'] = self.cell_line
metadata['refinebio_treatment'] = self.treatment
metadata['refinebio_race'] = self.race
metadata['refinebio_subject'] = self.subject
metadata['refinebio_compound'] = self.compound
metadata['refinebio_time'] = self.time
metadata['refinebio_platform'] = self.pretty_platform
metadata['refinebio_annotations'] = [
data for data in self.sampleannotation_set.all().values_list('data', flat=True)
]
return metadata
# Returns a set of ProcessorJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_processor_jobs(self) -> Set:
processor_jobs = set()
for original_file in self.original_files.prefetch_related("processor_jobs").all():
for processor_job in original_file.processor_jobs.all():
processor_jobs.add(processor_job)
return processor_jobs
# Returns a set of DownloaderJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_downloader_jobs(self) -> Set:
downloader_jobs = set()
for original_file in self.original_files.prefetch_related("downloader_jobs").all():
for downloader_job in original_file.downloader_jobs.all():
downloader_jobs.add(downloader_job)
return downloader_jobs
def get_result_files(self):
""" Get all of the ComputedFile objects associated with this Sample """
return self.computed_files.all()
def get_most_recent_smashable_result_file(self):
""" Get the most recent of the ComputedFile objects associated with this Sample """
try:
latest_computed_file = self.computed_files.filter(
is_public=True,
is_smashable=True,
).latest()
# Access a property to make the query fire now.
latest_computed_file.id
return latest_computed_file
except Exception as e:
# This sample has no smashable files yet.
return None
def get_most_recent_quant_sf_file(self):
""" Returns the latest quant.sf file that was generated for this sample.
Note: We don't associate that file to the computed_files of this sample, that's
why we have to go through the computational results. """
return ComputedFile.objects\
.filter(result__in=self.results.all(), filename='quant.sf')\
.order_by('-created_at')\
.first()
@property
def pretty_platform(self):
""" Turns
[HT_HG-U133_Plus_PM] Affymetrix HT HG-U133+ PM Array Plate
into
Affymetrix HT HG-U133+ PM Array Plate (hthgu133pluspm)
"""
if ']' in self.platform_name:
platform_base = self.platform_name.split(']')[1].strip()
else:
platform_base = self.platform_name
return platform_base + ' (' + self.platform_accession_code + ')'
class SampleAnnotation(models.Model):
""" Semi-standard information associated with a Sample """
class Meta:
db_table = "sample_annotations"
base_manager_name = 'public_objects'
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(SampleAnnotation, self).save(*args, **kwargs)
class ProcessedPublicObjectsManager(models.Manager):
"""
Only returns Experiments that are is_public and have related is_processed Samples.
"""
def get_queryset(self):
return super().get_queryset().filter(
is_public=True,
num_processed_samples__gt=0)
class Experiment(models.Model):
""" An Experiment or Study """
class Meta:
db_table = "experiments"
base_manager_name = 'public_objects'
def __str__(self):
return "Experiment: " + self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_public_objects = ProcessedPublicObjectsManager()
# Relations
samples = models.ManyToManyField('Sample', through='ExperimentSampleAssociation')
organisms = models.ManyToManyField('Organism', through='ExperimentOrganismAssociation')
# Identifiers
accession_code = models.CharField(max_length=64, unique=True)
alternate_accession_code = models.CharField(max_length=64, unique=True, null=True)
# Historical Properties
source_database = models.CharField(max_length=32) # "ArrayExpress, "SRA", "GEO"
source_url = models.TextField()
# Properties
# I was always under the impression that TextFields were slower
# than CharFields, however the Postgres documentation disagrees:
# https://www.postgresql.org/docs/9.0/static/datatype-character.html
title = models.TextField()
description = models.TextField()
protocol_description = JSONField(default=dict)
technology = models.CharField(max_length=256, blank=True)
submitter_institution = models.CharField(max_length=256, blank=True)
has_publication = models.BooleanField(default=False)
publication_title = models.TextField(default="")
publication_doi = models.CharField(max_length=64, blank=True)
publication_authors = ArrayField(models.TextField(), default=list)
pubmed_id = models.CharField(max_length=32, blank=True)
source_first_published = models.DateTimeField(null=True)
source_last_modified = models.DateTimeField(null=True)
# Cached Computed Properties
num_total_samples = models.IntegerField(default=0)
num_processed_samples = models.IntegerField(default=0)
num_downloadable_samples = models.IntegerField(default=0)
sample_metadata_fields = ArrayField(models.TextField(), default=list)
organism_names = ArrayField(models.TextField(), default=list)
platform_names = ArrayField(models.TextField(), default=list)
platform_accession_codes = ArrayField(models.TextField(), default=list)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
if self.accession_code and not self.alternate_accession_code:
if self.accession_code.startswith('GSE'):
self.alternate_accession_code = 'E-GEOD-' + self.accession_code[3:]
elif self.accession_code.startswith('E-GEOD-'):
self.alternate_accession_code = 'GSE' + self.accession_code[7:]
return super(Experiment, self).save(*args, **kwargs)
def update_num_samples(self):
""" Update our cache values """
aggregates = self.samples.aggregate(
num_total_samples=Count('id'),
num_processed_samples=Count('id', filter=Q(is_processed=True)),
num_downloadable_samples=Count('id', filter=Q(is_processed=True, organism__qn_target__isnull=False))
)
self.num_total_samples = aggregates['num_total_samples']
self.num_processed_samples = aggregates['num_processed_samples']
self.num_downloadable_samples = aggregates['num_downloadable_samples']
self.save()
def to_metadata_dict(self):
""" Render this Experiment as a dict """
metadata = {}
metadata['title'] = self.title
metadata['accession_code'] = self.accession_code
metadata['organisms'] = [organism.name for organism in self.organisms.all()]
metadata['description'] = self.description
metadata['protocol_description'] = self.protocol_description
metadata['technology'] = self.technology
metadata['submitter_institution'] = self.submitter_institution
metadata['has_publication'] = self.has_publication
metadata['publication_title'] = self.publication_title
metadata['publication_doi'] = self.publication_doi
metadata['pubmed_id'] = self.pubmed_id
if self.source_first_published:
metadata['source_first_published'] = self.source_first_published.strftime(
'%Y-%m-%dT%H:%M:%S')
else:
metadata['source_first_published'] = ''
if self.source_last_modified:
metadata['source_last_modified'] = self.source_last_modified.strftime(
'%Y-%m-%dT%H:%M:%S')
else:
metadata['source_last_modified'] = ''
return metadata
def get_sample_metadata_fields(self):
""" Get all metadata fields that are non-empty for at least one sample in the experiment.
See https://github.com/AlexsLemonade/refinebio-frontend/issues/211 for why this is needed.
"""
fields = []
possible_fields = ['sex', 'age', 'specimen_part', 'genotype', 'disease', 'disease_stage',
'cell_line', 'treatment', 'race', 'subject', 'compound', 'time']
samples = self.samples.all()
for field in possible_fields:
for sample in samples:
if getattr(sample, field) != None and getattr(sample, field) != '':
fields.append(field)
break
return fields
def update_sample_metadata_fields(self):
self.sample_metadata_fields = self.get_sample_metadata_fields()
def update_organism_names(self):
self.organism_names = self.get_organism_names()
def update_platform_names(self):
self.platform_names = self.get_platform_names()
self.platform_accession_codes = self.get_platform_accession_codes()
def get_sample_technologies(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([sample.technology for sample in self.samples.all()]))
def get_organism_names(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([organism.name for organism in self.organisms.all()]))
def get_platform_names(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_name for sample in self.samples.all()]))
def get_platform_accession_codes(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_accession_code for sample in self.samples.all()]))
@property
def platforms(self):
""" Returns a list of related pipelines """
return list(set([sample.platform_name for sample in self.samples.all()]))
@property
def pretty_platforms(self):
""" Returns a prettified list of related pipelines """
return list(set([sample.pretty_platform for sample in self.samples.all()]))
@property
def processed_samples(self):
return list([sample.accession_code for sample in self.samples.all() if sample.is_processed == True])
@property
def downloadable_samples(self):
"""
Returns the accession codes of the downloadable samples in this experiment.
This is indexed on elastic search and used to count the number of samples
on the filters.
"""
return list(self.samples.filter(is_processed=True, organism__qn_target__isnull=False)\
.values_list('accession_code', flat=True))
class ExperimentAnnotation(models.Model):
""" Semi-standard information associated with an Experiment """
class Meta:
db_table = "experiment_annotations"
base_manager_name = 'public_objects'
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ExperimentAnnotation, self).save(*args, **kwargs)
class Pipeline(models.Model):
"""Pipeline that is associated with a series of ComputationalResult records."""
name = models.CharField(max_length=255)
steps = ArrayField(models.IntegerField(), default=list)
class Meta:
db_table = "pipelines"
class Processor(models.Model):
"""Processor associated with a certain ComputationalResult."""
name = models.CharField(max_length=255)
version = models.CharField(max_length=64)
docker_image = models.CharField(max_length=255)
environment = JSONField(default=dict)
class Meta:
db_table = "processors"
unique_together = ('name', 'version', 'docker_image', 'environment')
def __str__(self):
return "Processor: %s (version: %s, docker_image: %s)" % (self.name, self.version, self.docker_image)
class ComputationalResult(models.Model):
""" Meta-information about the output of a computer process. (Ex Salmon) """
class Meta:
db_table = "computational_results"
base_manager_name = 'public_objects'
def __str__(self):
processor_name_str = ""
if self.processor:
processor_name_str = ": " + str(self.processor.name)
return "ComputationalResult " + str(self.pk) + processor_name_str
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
commands = ArrayField(models.TextField(), default=list)
processor = models.ForeignKey(Processor, blank=True, null=True, on_delete=models.CASCADE)
samples = models.ManyToManyField('Sample', through='SampleResultAssociation')
# The Organism Index used to process the sample.
organism_index = models.ForeignKey('OrganismIndex', blank=True, null=True, on_delete=models.SET_NULL)
is_ccdl = models.BooleanField(default=True)
# Stats
time_start = models.DateTimeField(blank=True, null=True)
time_end = models.DateTimeField(blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResult, self).save(*args, **kwargs)
def remove_computed_files_from_s3(self):
""" Removes all associated computed files from S3. Use this before deleting a computational result. """
for computed_file in self.computedfile_set.all():
computed_file.delete_s3_file()
class ComputationalResultAnnotation(models.Model):
""" Non-standard information associated with an ComputationalResult """
class Meta:
db_table = "computational_result_annotations"
base_manager_name = 'public_objects'
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResultAnnotation, self).save(*args, **kwargs)
# TODO
# class Gene(models.Model):
""" A representation of a Gene """
# class Meta:
# db_table = "genes"
class OrganismIndex(models.Model):
""" A special type of process result, necessary for processing other SRA samples """
class Meta:
db_table = "organism_index"
base_manager_name = 'public_objects'
def __str__(self):
return "OrganismIndex " + str(self.pk) + ": " + self.organism.name + \
' [' + self.index_type + '] - ' + str(self.salmon_version)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
# ex., "TRANSCRIPTOME_LONG", "TRANSCRIPTOME_SHORT"
index_type = models.CharField(max_length=255)
# This corresponds to Ensembl's release number:
# http://ensemblgenomes.org/info/about/release_cycle
# Determined by hitting:
# http://rest.ensembl.org/info/software?content-type=application/json
source_version = models.CharField(max_length=255, default="93")
# The name of the genome assembly used which corresponds to 'GRCh38' in:
# ftp://ftp.ensembl.org/pub/release-93/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz
assembly_name = models.CharField(max_length=255, default="UNKNOWN")
# This matters, for instance salmon 0.9.0 indexes don't work with 0.10.0
salmon_version = models.CharField(max_length=255, default=CURRENT_SALMON_VERSION)
# We keep the director unextracted on the shared filesystem so all
# Salmon jobs can access it.
absolute_directory_path = models.CharField(max_length=255, blank=True, null=True, default="")
# S3 Information
s3_url = models.CharField(max_length=255, default="")
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OrganismIndex, self).save(*args, **kwargs)
"""
# Files
These are the database representations of files
which live on local disk, on ephemeral storage,
or on AWS cloud services.
"""
class OriginalFile(models.Model):
""" A representation of a file from an external source """
class Meta:
db_table = "original_files"
indexes = [
models.Index(fields=['filename']),
models.Index(fields=['source_filename']),
]
def __str__(self):
return "OriginalFile: " + self.get_display_name()
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# File Properties
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
size_in_bytes = models.BigIntegerField(blank=True, null=True)
sha1 = models.CharField(max_length=64)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Relations
samples = models.ManyToManyField('Sample', through='OriginalFileSampleAssociation')
processor_jobs = models.ManyToManyField('data_refinery_common.ProcessorJob', through='ProcessorJobOriginalFileAssociation')
downloader_jobs = models.ManyToManyField('data_refinery_common.DownloaderJob', through='DownloaderJobOriginalFileAssociation')
# Historical Properties
source_url = models.TextField()
is_archive = models.BooleanField(default=True)
source_filename = models.CharField(max_length=255, blank=False)
# Scientific Properties
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Crunch Properties
is_downloaded = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OriginalFile, self).save(*args, **kwargs)
def set_downloaded(self, absolute_file_path, filename = None):
""" Marks the file as downloaded, if `filename` is not provided it will
be parsed from the `absolute_file_path` """
self.is_downloaded = True
self.is_archive = FileUtils.is_archive(absolute_file_path)
self.absolute_file_path = absolute_file_path
self.filename = filename if filename else os.path.basename(absolute_file_path)
self.calculate_size()
self.calculate_sha1()
self.save()
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def get_display_name(self):
""" For dev convenience """
if not self.filename:
return self.source_filename
else:
return self.filename
def get_extension(self):
""" Returns the lowercased extension of the filename
Thanks to https://stackoverflow.com/a/541408/763705 """
return FileUtils.get_extension(self.filename)
def is_blacklisted(self):
return self.get_extension() in [".xml", ".chp", ".exp"]
def delete_local_file(self):
""" Deletes this file from the local file system."""
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception("Unexpected delete file exception.",
absolute_file_path=self.absolute_file_path
)
self.is_downloaded = False
self.save()
def has_blocking_jobs(self, own_processor_id=None) -> bool:
# If the file has a processor job that should not have been
# retried, then it still shouldn't be retried.
no_retry_processor_jobs = self.processor_jobs.filter(no_retry=True)
# If the file has a processor job that hasn't even started
# yet, then it doesn't need another.
incomplete_processor_jobs = self.processor_jobs.filter(
end_time__isnull=True,
success__isnull=True,
retried=False
)
if own_processor_id:
incomplete_processor_jobs = incomplete_processor_jobs.exclude(id=own_processor_id)
# Check if there's any jobs which should block another
# processing attempt.
blocking_jobs = no_retry_processor_jobs | incomplete_processor_jobs
return blocking_jobs.first() is not None
def needs_processing(self, own_processor_id=None) -> bool:
"""Returns False if original_file has been or is being processed.
Returns True otherwise.
If own_processor_id is supplied then it will be ignored so
that processor jobs can use this function without their job
being counted as currently processing this file.
"""
sample = self.samples.first()
if not sample:
return True
if self.has_blocking_jobs(own_processor_id):
return False
if sample.source_database == "SRA":
computed_file = sample.get_most_recent_smashable_result_file()
# If there's no smashable file then we should check the quant.sf file.
if not computed_file:
computed_file = sample.get_most_recent_quant_sf_file()
# If there's neither a quant.sf file nor a smashable file
# then we definitely need to process it.
if not computed_file:
return True
if computed_file.s3_bucket and computed_file.s3_key \
and computed_file.result.organism_index is not None \
and computed_file.result.organism_index.salmon_version == CURRENT_SALMON_VERSION:
# If the file wasn't computed with the latest
# version of salmon, then it should be rerun
# with the latest version of salmon.
return False
else:
# If this original_file has multiple samples (is an
# archive), and any of them haven't been processed, we'll
# need the entire archive in order to process any of them.
# A check to not re-processed the already processed
# samples in the archive will happen elsewhere before
# dispatching.
for sample in self.samples.all():
if not sample.is_processed:
return True
computed_file = sample.get_most_recent_smashable_result_file()
if not computed_file:
return True
if settings.RUNNING_IN_CLOUD \
and (computed_file.s3_bucket is None or computed_file.s3_key is None):
return True
return False
# If we aren't sure, prefer reprocessing over never processing.
return True
def needs_downloading(self, own_processor_id=None) -> bool:
"""Determine if a file needs to be downloaded.
This is true if the file has already been downloaded and lost
without getting processed.
"""
# If the file is downloaded and the file actually exists on disk,
# then it doens't need to be downloaded.
if self.absolute_file_path \
and os.path.exists(self.absolute_file_path):
return False
unstarted_downloader_jobs = self.downloader_jobs.filter(
start_time__isnull=True,
success__isnull=True,
retried=False
)
# If the file has a downloader job that hasn't even started yet,
# then it doesn't need another.
if unstarted_downloader_jobs.count() > 0:
return False
# If this file has been processed, then it doesn't need to be downloaded again.
return self.needs_processing(own_processor_id)
def is_affy_data(self) -> bool:
"""Return true if original_file is a CEL file or a gzipped CEL file.
"""
upper_name = self.source_filename.upper()
return (len(upper_name) > 4 and upper_name[-4:] == ".CEL") \
or (len(upper_name) > 7 and upper_name[-7:] == ".CEL.GZ")
class ComputedFile(models.Model):
""" A representation of a file created by a data-refinery process """
class Meta:
db_table = "computed_files"
get_latest_by = "created_at"
indexes = [
models.Index(fields=['filename']),
]
def __str__(self):
return "ComputedFile: " + str(self.filename)
SVD_ALGORITHM_CHOICES = (
('NONE', 'None'),
('RANDOMIZED', 'randomized'),
('ARPACK', 'arpack'),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Object relations
samples = models.ManyToManyField('Sample', through='SampleComputedFileAssociation')
# File related
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
# TODO: make this work w/ migrations:
# absolute_file_path = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField()
sha1 = models.CharField(max_length=64)
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
# Scientific
is_smashable = models.BooleanField(default=False)
is_qc = models.BooleanField(default=False)
is_qn_target = models.BooleanField(default=False)
# Compendia details
quant_sf_only = models.BooleanField(default=False)
is_compendia = models.BooleanField(default=False)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text='The SVD algorithm that was used to generate the file.'
)
compendia_organism = models.ForeignKey(Organism,
blank=True,
null=True,
on_delete=models.CASCADE
)
compendia_version = models.IntegerField(blank=True, null=True)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputedFile, self).save(*args, **kwargs)
def sync_to_s3(self, s3_bucket=None, s3_key=None) -> bool:
""" Syncs a file to AWS S3.
"""
if not settings.RUNNING_IN_CLOUD:
return True
self.s3_bucket = s3_bucket
self.s3_key = s3_key
try:
S3.upload_file(
self.absolute_file_path,
s3_bucket,
s3_key,
ExtraArgs={
'ACL': 'public-read',
'StorageClass': 'STANDARD_IA'
}
)
self.save()
except Exception as e:
logger.exception('Error uploading computed file to S3',
computed_file_id=self.pk,
s3_key=self.s3_key,
s3_bucket=self.s3_bucket
)
self.s3_bucket = None
self.s3_key = None
return False
return True
def sync_from_s3(self, force=False, path=None):
""" Downloads a file from S3 to the local file system.
Returns the absolute file path.
"""
path = path if path is not None else self.absolute_file_path
if not settings.RUNNING_IN_CLOUD and not force:
if os.path.exists(path):
return path
else:
# If the file doesn't exist at path and we're not
# running in the cloud, then the file is almost
# certainly at its absolute_file_path because it never got deleted.
if os.path.exists(self.absolute_file_path):
shutil.copyfile(self.absolute_file_path, path)
return path
else:
# We don't have the file :(
return None
target_directory = os.path.dirname(self.absolute_file_path)
os.makedirs(target_directory, exist_ok=True)
try:
S3.download_file(
self.s3_bucket,
self.s3_key,
path
)
# Veryify sync integrity
synced_sha1 = calculate_sha1(path)
if self.sha1 != synced_sha1:
raise AssertionError("SHA1 of downloaded ComputedFile doesn't match database SHA1!")
return path
except Exception as e:
logger.exception(e, computed_file_id=self.pk)
return None
def change_s3_location(self, new_bucket: str, new_key: str) -> bool:
"""Moves the file from its current location in S3.
The new location will be set based on `new_bucket` and
`new_key`. The s3_bucket and s3_key properties will be updated
to reflect this on a successful move.
"""
old_bucket = self.s3_bucket
old_key = self.s3_key
copy_source = {
'Bucket': old_bucket,
'Key': old_key
}
try:
response = S3.copy_object(Bucket=new_bucket,
CopySource=copy_source,
Key=new_key)
except:
logger.exception("Could not copy computed file within S3",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key)
return False
try:
self.s3_bucket = new_bucket
self.s3_key = new_key
self.save()
except:
logger.exception("Could not save computed file after it was copied!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key)
return False
try:
response = S3.delete_object(Bucket=old_bucket, Key=old_key)
except:
logger.exception("Could not delete computed file after it was copied and saved!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key)
return False
return True
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def delete_local_file(self, force=False):
""" Deletes a file from the path and actually removes it from the file system."""
if not settings.RUNNING_IN_CLOUD and not force:
return
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception("Unexpected delete file exception.",
absolute_file_path=self.absolute_file_path
)
def delete_s3_file(self, force=False):
# If we're not running in the cloud then we shouldn't try to
# delete something from S3 unless force is set.
if not settings.RUNNING_IN_CLOUD and not force:
return False
try:
S3.delete_object(Bucket=self.s3_bucket, Key=self.s3_key)
except:
logger.exception("Failed to delete S3 object for Computed File.",
computed_file=self.id,
s3_object=self.s3_key)
return False
self.s3_key = None
self.s3_bucket = None
self.save()
return True
def get_synced_file_path(self, force=False, path=None):
""" Fetches the absolute file path to this ComputedFile, fetching from S3 if it
isn't already available locally. """
if path:
if os.path.exists(path):
return path
else:
return self.sync_from_s3(force, path)
else:
if os.path.exists(self.absolute_file_path):
return self.absolute_file_path
else:
return self.sync_from_s3(force)
@property
def s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
return self.get_s3_url()
def get_s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': self.s3_bucket,
'Key': self.s3_key
},
ExpiresIn=(60 * 60 * 7 * 24) # 7 days in seconds.
)
else:
return None
class Dataset(models.Model):
""" A Dataset is a desired set of experiments/samples to smash and download """
AGGREGATE_CHOICES = (
('ALL', 'All'),
('EXPERIMENT', 'Experiment'),
('SPECIES', 'Species')
)
SCALE_CHOICES = (
('NONE', 'None'),
('MINMAX', 'Minmax'),
('STANDARD', 'Standard'),
('ROBUST', 'Robust'),
)
SVD_ALGORITHM_CHOICES = (
('NONE', 'None'),
('RANDOMIZED', 'randomized'),
('ARPACK', 'arpack'),
)
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Experiments and samples live here: {'E-ABC-1': ['SAMP1', 'SAMP2']}
# This isn't going to be queryable, so we can use JSON-in-text, just make
# sure we validate properly in and out!
data = JSONField(default=dict, help_text="This is a dictionary where the keys are experiment accession codes and the values are lists with sample accession codes. Eg: `{'E-ABC-1': ['SAMP1', 'SAMP2']}`")
# Processing properties
aggregate_by = models.CharField(max_length=255, choices=AGGREGATE_CHOICES, default="EXPERIMENT", help_text="Specifies how samples are [aggregated](http://docs.refine.bio/en/latest/main_text.html#aggregations).")
scale_by = models.CharField(max_length=255, choices=SCALE_CHOICES, default="NONE", help_text="Specifies options for [transformations](http://docs.refine.bio/en/latest/main_text.html#transformations).")
quantile_normalize = models.BooleanField(
default=True,
help_text="Part of the advanced options. Allows [skipping quantile normalization](http://docs.refine.bio/en/latest/faq.html#what-does-it-mean-to-skip-quantile-normalization-for-rna-seq-samples) for RNA-Seq samples."
)
quant_sf_only = models.BooleanField(default=False, help_text="Include only quant.sf files in the generated dataset.")
svd_algorithm = models.CharField(max_length=255, choices=SVD_ALGORITHM_CHOICES, default="NONE", help_text="Specifies choice of SVD algorithm")
# State properties
is_processing = models.BooleanField(default=False) # Data is still editable when False
is_processed = models.BooleanField(default=False) # Result has been made
is_available = models.BooleanField(default=False) # Result is ready for delivery
processor_jobs = models.ManyToManyField('data_refinery_common.ProcessorJob', through='ProcessorJobDataSetAssociation')
# Fail handling
success = models.NullBooleanField(null=True)
failure_reason = models.TextField()
# Delivery properties
email_address = models.CharField(max_length=255, blank=True, null=True)
email_ccdl_ok = models.BooleanField(default=False)
email_sent = models.BooleanField(default=False) # Result has been made
expires_on = models.DateTimeField(blank=True, null=True)
# Deliverables
s3_bucket = models.CharField(max_length=255)
s3_key = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField(blank=True, null=True, default=0, help_text="Contains the size in bytes of the processed dataset.")
sha1 = models.CharField(max_length=64, null=True, default='')
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Dataset, self).save(*args, **kwargs)
def get_samples(self):
""" Retuns all of the Sample objects in this Dataset """
all_samples = []
for sample_list in self.data.values():
all_samples = all_samples + sample_list
all_samples = list(set(all_samples))
return Sample.objects.filter(accession_code__in=all_samples)
def get_experiments(self):
""" Retuns all of the Experiments objects in this Dataset """
all_experiments = self.data.keys()
return Experiment.objects.filter(accession_code__in=all_experiments)
def get_samples_by_experiment(self):
""" Returns a dict of sample QuerySets, for samples grouped by experiment. """
all_samples = {}
for experiment, samples in self.data.items():
all_samples[experiment] = Sample.objects.filter(accession_code__in=samples)
return all_samples
def get_samples_by_species(self):
""" Returns a dict of sample QuerySets, for samples grouped by species. """
by_species = {}
all_samples = self.get_samples()
for sample in all_samples:
if not by_species.get(sample.organism.name, None):
by_species[sample.organism.name] = [sample]
else:
by_species[sample.organism.name].append(sample)
return by_species
def get_aggregated_samples(self):
""" Uses aggregate_by to return a smasher-ready sample dict. """
if self.aggregate_by == "ALL":
return {'ALL': self.get_samples()}
elif self.aggregate_by == "EXPERIMENT":
return self.get_samples_by_experiment()
else:
return self.get_samples_by_species()
def is_cross_technology(self):
""" Determine if this involves both Microarray + RNASeq"""
if len(self.get_samples().values('technology').distinct()) > 1:
return True
else:
return False
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': self.s3_bucket,
'Key': self.s3_key
},
ExpiresIn=(60 * 60 * 7 * 24) # 7 days in seconds.
)
else:
return None
def s3_url(self):
""" Render the resulting S3 URL """
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def has_email(self):
""" Returns if the email is set or not """
return bool(self.email_address)
class APIToken(models.Model):
""" Required for starting a smash job """
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Activation
is_activated = models.BooleanField(default=False)
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(APIToken, self).save(*args, **kwargs)
@property
def terms_and_conditions(self):
""" """
return settings.TERMS_AND_CONDITIONS
"""
# Associations
These represent the relationships between items in the other tables.
"""
class ExperimentSampleAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_sample_associations"
unique_together = ('experiment', 'sample')
class ExperimentOrganismAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_organism_associations"
unique_together = ('experiment', 'organism')
class DownloaderJobOriginalFileAssociation(models.Model):
downloader_job = models.ForeignKey(
"data_refinery_common.DownloaderJob", blank=False, null=False, on_delete=models.CASCADE)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "downloaderjob_originalfile_associations"
unique_together = ('downloader_job', 'original_file')
class ProcessorJobOriginalFileAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "processorjob_originalfile_associations"
unique_together = ('processor_job', 'original_file')
class ProcessorJobDatasetAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE)
dataset = models.ForeignKey(Dataset, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "processorjob_dataset_associations"
class OriginalFileSampleAssociation(models.Model):
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "original_file_sample_associations"
unique_together = ('original_file', 'sample')
class SampleResultAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "sample_result_associations"
unique_together = ('result', 'sample')
class SampleComputedFileAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
computed_file = models.ForeignKey(
ComputedFile, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "sample_computed_file_associations"
unique_together = ('sample', 'computed_file')
class ExperimentResultAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_result_associations"
unique_together = ('result', 'experiment')
<|code_end|>
workers/data_refinery_workers/processors/management/commands/create_qn_target.py
<|code_start|>import requests
import sys
from django.core.management.base import BaseCommand
from django.db.models import Count
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
Dataset,
Experiment,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SampleComputedFileAssociation,
)
from data_refinery_workers.processors import qn_reference, utils
logger = get_and_configure_logger(__name__)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organism",
type=str,
help=("Name of organism"))
parser.add_argument(
"--all",
default=False,
action='store_true',
help=("Perform for all organisms that meet <min> processed samples"))
parser.add_argument(
"--min",
type=int,
default=100,
help=("Minimum number of processed samples"))
parser.add_argument(
"--platform",
type=str,
default=None,
help=("Name of platform")
)
parser.add_argument(
"--job_id",
type=int,
default=None,
help=("ID of job to run.")
)
def handle(self, *args, **options):
"""
"""
if not options["job_id"]:
if options["organism"] is None and not options["all"]:
logger.error("You must specify an organism or --all")
sys.exit(1)
if options["organism"] and (options.get('organism', '') != "ALL"):
organisms = [Organism.get_object_for_name(options["organism"].upper())]
else:
organisms = Organism.objects.all()
for organism in organisms:
if not organism_can_have_qn_target(organism):
logger.error("Organism does not have any platform with enough samples to generate a qn target",
organism=organism,
min=options["min"])
continue
samples = organism.sample_set.filter(has_raw=True, technology="MICROARRAY", is_processed=True)
if samples.count() == 0:
logger.error("No processed samples for organism.",
organism=organism,
count=samples.count()
)
continue
if options["platform"] is None:
platform_counts = samples.values('platform_accession_code').annotate(dcount=Count('platform_accession_code')).order_by('-dcount')
biggest_platform = platform_counts[0]['platform_accession_code']
else:
biggest_platform = options["platform"]
sample_codes_results = Sample.processed_objects.filter(
platform_accession_code=biggest_platform,
has_raw=True,
technology="MICROARRAY",
organism=organism,
is_processed=True).values('accession_code')
sample_codes = [res['accession_code'] for res in sample_codes_results]
dataset = Dataset()
dataset.data = {organism.name + '_(' + biggest_platform + ')': sample_codes}
dataset.aggregate_by = "ALL"
dataset.scale_by = "NONE"
dataset.quantile_normalize = False
dataset.save()
job = ProcessorJob()
job.pipeline_applied = "QN_REFERENCE"
job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dataset
pjda.save()
final_context = qn_reference.create_qn_reference(job.pk)
if final_context['success']:
print(":D")
self.stdout.write("Target file: " + final_context['target_file'])
self.stdout.write("Target S3: " + str(final_context['computed_files'][0].get_s3_url()))
else:
print(":(")
else:
qn_reference.create_qn_reference(options["job_id"])
def organism_can_have_qn_target(organism: Organism, sample_threshold=100):
""" Check that the organism has more than `sample_threshold` samples on
some microarray platform """
microarray_platforms = organism.sample_set\
.filter(has_raw=True, technology="MICROARRAY", is_processed=True)\
.values('platform_accession_code')\
.annotate(count=Count('id'))\
.filter(count__gt=sample_threshold)
return microarray_platforms.exists()
<|code_end|>
workers/data_refinery_workers/processors/management/commands/remove_invalid_computational_results.py
<|code_start|>
from django.core.management.base import BaseCommand
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputationalResultAnnotation,
ComputedFile, Organism)
from .create_qn_target import organism_can_have_qn_target
logger = get_and_configure_logger(__name__)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--dry-run',
default=False,
action='store_true',
help='Prints resulting actions without actually running them.')
parser.add_argument("--min",
type=int,
default=100,
help=("Minimum number of processed microarray samples for each organism"))
parser.add_argument("--qn-target",
default=False,
action='store_true',
help=("Remove invalid QN targets"))
parser.add_argument("--compendias",
default=False,
action='store_true',
help=("Remove invalid Compendias"))
def handle(self, *args, **options):
""" Takes care of removing invalid computational results. """
computational_result_ids = []
if options['compendias']:
computational_result_ids += remove_invalid_compendias(options['min'])
if options['qn_target']:
computational_result_ids += remove_invalid_qn_targets(options['min'])
if not computational_result_ids:
logger.info('Nothing removed. Use options --compendia or --qn-target to select which computational results to check.')
logger.info("Removing computational results with ids %s", str(computational_result_ids))
if not options['dry_run']:
# delete all invalid compendias from S3
compendias = ComputationalResult.objects.filter(id__in=computational_result_ids)
for computational_result in compendias:
computational_result.remove_computed_files_from_s3()
# delete all compendias
compendias.delete()
def remove_invalid_qn_targets(min_samples):
qn_target_ids = []
for organism in Organism.object.filter(qn_target__isnull=False):
if not organism_can_have_qn_target(organism, min_samples):
# Remove all qn targets associated with this object
qn_target_ids += ComputationalResultAnnotation.objects\
.filter(data__is_qn=True, data__organism_id=organism.id)\
.values_list('result__id', flat=True)
logger.debug('Remove all QN targets for organism', organism=organism)
return qn_target_ids
def remove_invalid_compendias(min_samples):
organism_with_compendia = _get_organisms_with_compendias()
computational_result_ids = []
for organism in organism_with_compendia:
if not organism_can_have_qn_target(organism, min_samples):
# Remove all compendias that are associated with organisms that can't have QN targets
computational_result_ids += ComputedFile.objects\
.filter(is_compendia=True, compendia_organism=organism)\
.values_list('result__id', flat=True)
logger.debug('Remove all Compendias for organism', organism=organism)
return computational_result_ids
def _get_organisms_with_compendias():
organism_ids = ComputedFile.objects\
.filter(is_compendia=True)\
.values_list('compendia_organism__id', flat=True)\
.distinct()
return Organism.objects.filter(id__in=organism_ids)
<|code_end|>
|
Quantpendia doesn't seem to run
### Context
Just triggered the quantpendia job for Danio
### Problem or idea
It doesn't seem to have started:
```
ubuntu@ip-10-0-80-121:~$ ./run_management_command.sh create_compendia --organisms=DANIO_RERIO --quant-sf-only=True --svd-algorithm=None
fd6293be1671e7c5ebf63a96473ddac6d1d084a58dfecb85262487f31198aec3
ubuntu@ip-10-0-80-121:~$ docker logs fd6293be1671e7c5ebf63a96473ddac6d1d084a58dfecb85262487f31198aec3
2019-10-21 16:53:29,668 i-09b7a30710d2781fa [volume: -1] data_refinery_foreman.foreman.management.commands.create_compendia INFO [job_id: 29291464] [organism: DANIO_RERIO]: Sending CREATE_COMPENDIA for Organism
```
That job doesn't have a `start_date` on the db.
```
id | pipeline_applied | no_retry | ram_amount | volume_index | start_time | end_time | success | nomad_job_id | num_retries | retried | worker_id | worker_version | failure_reason | created_at | last_modified | retried_job_id
----------+--------------------+----------+------------+--------------+------------+----------+---------+-----------------------------------------------+-------------+---------+-----------+----------------+----------------+-------------------------------+-------------------------------+----------------
29291464 | CREATE_QUANTPENDIA | f | 2048 | | | | | CREATE_COMPENDIA/dispatch-1571676809-09e79eb6 | 0 | f | | | | 2019-10-21 16:53:29.607529+00 | 2019-10-21 16:53:29.686031+00 |
(1 row)
```
And seems to be an error on nomad when creating the job:
```
ubuntu@ip-10-0-242-7:~$ nomad status CREATE_COMPENDIA/dispatch-1571676809-09e79eb6
ID = CREATE_COMPENDIA/dispatch-1571676809-09e79eb6
Name = CREATE_COMPENDIA/dispatch-1571676809-09e79eb6
Submit Date = 2019-10-21T16:53:29Z
Type = batch
Priority = 80
Datacenters = dc1
Status = pending
Periodic = false
Parameterized = false
Summary
Task Group Queued Starting Running Failed Complete Lost
jobs 1 0 0 0 0 0
Placement Failure
Task Group "jobs":
* Constraint "${meta.is_smasher} = true" filtered 1 nodes
* Resources exhausted on 1 nodes
* Dimension "memory" exhausted on 1 nodes
Allocations
No allocations placed
```
### Solution or next step
We need to investigate what happened.
| common/data_refinery_common/message_queue.py
<|code_start|>"""Provides an interface to send messages to the Message Queue."""
from __future__ import absolute_import, unicode_literals
import nomad
from django.conf import settings
from enum import Enum
from nomad.api.exceptions import URLNotFoundNomadException
from data_refinery_common.utils import get_env_variable, get_env_variable_gracefully, get_volume_index
from data_refinery_common.models import ProcessorJob, SurveyJob, DownloaderJob
from data_refinery_common.job_lookup import ProcessorPipeline, Downloaders, SurveyJobTypes, SMASHER_JOB_TYPES
from data_refinery_common.logging import get_and_configure_logger
logger = get_and_configure_logger(__name__)
# These two constants refer to image names that should be used for
# multiple jobs.
NOMAD_TRANSCRIPTOME_JOB = "TRANSCRIPTOME_INDEX"
NOMAD_DOWNLOADER_JOB = "DOWNLOADER"
NONE_JOB_ERROR_TEMPLATE = "send_job was called with NONE job_type: {} for {} job {}"
def send_job(job_type: Enum, job, is_dispatch=False) -> bool:
"""Queues a worker job by sending a Nomad Job dispatch message.
job_type must be a valid Enum for ProcessorPipelines or
Downloaders as defined in data_refinery_common.job_lookup.
job must be an existing ProcessorJob or DownloaderJob record.
Returns True if the job was successfully dispatch, return False otherwise.
"""
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
is_processor = True
if job_type is ProcessorPipeline.TRANSCRIPTOME_INDEX_LONG \
or job_type is ProcessorPipeline.TRANSCRIPTOME_INDEX_SHORT:
nomad_job = NOMAD_TRANSCRIPTOME_JOB
elif job_type is ProcessorPipeline.SALMON or job_type is ProcessorPipeline.TXIMPORT:
# Tximport uses the same job specification as Salmon.
nomad_job = ProcessorPipeline.SALMON.value
elif job_type is ProcessorPipeline.AFFY_TO_PCL:
nomad_job = ProcessorPipeline.AFFY_TO_PCL.value
elif job_type is ProcessorPipeline.NO_OP:
nomad_job = ProcessorPipeline.NO_OP.value
elif job_type is ProcessorPipeline.ILLUMINA_TO_PCL:
nomad_job = ProcessorPipeline.ILLUMINA_TO_PCL.value
elif job_type is ProcessorPipeline.SMASHER:
nomad_job = ProcessorPipeline.SMASHER.value
elif job_type is ProcessorPipeline.JANITOR:
nomad_job = ProcessorPipeline.JANITOR.value
elif job_type is ProcessorPipeline.QN_REFERENCE:
nomad_job = ProcessorPipeline.QN_REFERENCE.value
elif job_type is ProcessorPipeline.CREATE_COMPENDIA:
nomad_job = ProcessorPipeline.CREATE_COMPENDIA.value
elif job_type is ProcessorPipeline.AGILENT_TWOCOLOR_TO_PCL:
# Agilent twocolor uses the same job specification as Affy.
nomad_job = ProcessorPipeline.AFFY_TO_PCL.value
elif job_type in list(Downloaders):
nomad_job = NOMAD_DOWNLOADER_JOB
is_processor = False
elif job_type in list(SurveyJobTypes):
nomad_job = job_type.value
is_processor = False
elif job_type is Downloaders.NONE:
logger.warn("Not queuing %s job.", job_type, job_id=job_id)
raise ValueError(NONE_JOB_ERROR_TEMPLATE.format(job_type.value, "Downloader", job_id))
elif job_type is ProcessorPipeline.NONE:
logger.warn("Not queuing %s job.", job_type, job_id=job_id)
raise ValueError(NONE_JOB_ERROR_TEMPLATE.format(job_type.value, "Processor", job_id))
else:
raise ValueError("Invalid job_type: {}".format(job_type.value))
logger.debug("Queuing %s nomad job to run job %s with id %d.",
nomad_job,
job_type.value,
job.id)
# We only want to dispatch processor jobs directly.
# Everything else will be handled by the Foreman, which will increment the retry counter.
if is_processor or is_dispatch or (not settings.RUNNING_IN_CLOUD):
# Smasher doesn't need to be on a specific instance since it will
# download all the data to its instance anyway.
if isinstance(job, ProcessorJob) and job_type not in SMASHER_JOB_TYPES:
# Make sure this job goes to the correct EBS resource.
# If this is being dispatched for the first time, make sure that
# we store the currently attached index.
# If this is being dispatched by the Foreman, it should already
# have an attached volume index, so use that.
if job.volume_index is None:
job.volume_index = get_volume_index()
job.save()
nomad_job = nomad_job + "_" + job.volume_index + "_" + str(job.ram_amount)
elif isinstance(job, SurveyJob):
nomad_job = nomad_job + "_" + str(job.ram_amount)
elif isinstance(job, DownloaderJob):
volume_index = job.volume_index if settings.RUNNING_IN_CLOUD else "0"
nomad_job = nomad_job + "_" + volume_index + "_" + str(job.ram_amount)
try:
nomad_response = nomad_client.job.dispatch_job(nomad_job, meta={"JOB_NAME": job_type.value,
"JOB_ID": str(job.id)})
job.nomad_job_id = nomad_response["DispatchedJobID"]
job.save()
return True
except URLNotFoundNomadException:
logger.info("Dispatching Nomad job of type %s for job spec %s to host %s and port %s failed.",
job_type, nomad_job, nomad_host, nomad_port, job=str(job.id))
raise
except Exception as e:
logger.info('Unable to Dispatch Nomad Job.',
job_name=job_type.value,
job_id=str(job.id),
reason=str(e))
raise
else:
job.num_retries = job.num_retries - 1
job.save()
return True
<|code_end|>
foreman/data_refinery_foreman/foreman/management/commands/create_compendia.py
<|code_start|>import sys
from django.core.management.base import BaseCommand
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (Dataset, Experiment, Organism,
ProcessorJob,
ProcessorJobDatasetAssociation)
from data_refinery_common.utils import queryset_iterator
logger = get_and_configure_logger(__name__)
def create_job_for_organism(organism=Organism, quant_sf_only=False, svd_algorithm='ARPACK'):
"""Returns a compendia job for the provided organism.
Fetch all of the experiments and compile large but normally formated Dataset.
"""
data = {}
experiments = Experiment.objects.filter(organisms=organism).prefetch_related('samples')
for experiment in queryset_iterator(experiments):
data[experiment.accession_code] = list(experiment.samples.filter(organism=organism)\
.values_list('accession_code', flat=True))
job = ProcessorJob()
if quant_sf_only:
job.pipeline_applied = ProcessorPipeline.CREATE_QUANTPENDIA.value
else:
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
dset = Dataset()
dset.data = data
dset.scale_by = 'NONE'
dset.aggregate_by = 'EXPERIMENT' if quant_sf_only else 'SPECIES'
dset.quantile_normalize = False
dset.quant_sf_only = quant_sf_only
dset.svd_algorithm = svd_algorithm
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
return job
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organisms",
type=str,
help=("Comma separated list of organism names."))
parser.add_argument(
"--quant-sf-only",
type=lambda x: x == "True",
help=("Whether to create a quantpendium or normal compendium. Quantpendium will be aggregated by EXPERIMENT"))
parser.add_argument(
"--svd-algorithm",
type=str,
help=("Specify SVD algorithm applied during imputation ARPACK, RANDOMIZED or NONE to skip."))
def handle(self, *args, **options):
"""Create a compendium for one or more organisms.
If --organism is supplied will immediately create a compedium
for it. If not a new job will be dispatched for each organism
with enough microarray samples except for human and mouse.
"""
if options["organisms"] is None:
all_organisms = Organism.objects.exclude(name__in=["HOMO_SAPIENS", "MUS_MUSCULUS"])
else:
organisms = options["organisms"].upper().replace(" ", "_").split(",")
all_organisms = Organism.objects.filter(name__in=organisms)
# I think we could just use options["quant_sf_only"] but I
# wanna make sure that values that are not True do not trigger
# a truthy evaluation.
quant_sf_only = options["quant_sf_only"] is True
# default algorithm to arpack until we decide that ranomized is preferred
svd_algorithm = 'NONE' if quant_sf_only else 'ARPACK'
if options["svd_algorithm"] in ['ARPACK', 'RANDOMIZED', 'NONE']:
svd_algorithm = options["svd_algorithm"]
logger.debug('Generating compendia for organisms', organisms=all_organisms)
for organism in all_organisms:
job = create_job_for_organism(organism, quant_sf_only, svd_algorithm)
logger.info("Sending CREATE_COMPENDIA for Organism", job_id=str(job.pk), organism=str(organism))
send_job(ProcessorPipeline.CREATE_COMPENDIA, job)
sys.exit(0)
<|code_end|>
workers/data_refinery_workers/processors/create_quantpendia.py
<|code_start|>import os
import shutil
import time
from django.utils import timezone
from typing import Dict, List, Tuple
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputedFile,
Organism,
Pipeline,
Sample)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
def create_quantpendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_QUANTPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
download_files,
create_result_objects,
utils.end_job])
return job_context
def download_files(job_context: Dict) -> Dict:
job_context['time_start'] = timezone.now()
_make_dirs(job_context)
num_samples = 0
for key, samples in job_context['samples'].items():
outfile_dir = job_context['output_dir'] + key + '/'
os.makedirs(outfile_dir, exist_ok=True)
# download quant.sf files directly into the dataset folder
num_samples += smashing_utils.sync_quant_files(outfile_dir, samples)
job_context['num_samples'] = num_samples
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = "create_quantpendia.py"
return job_context
def create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
result.is_public = True
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_QUANTPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
# Create the resulting archive
smashing_utils.write_non_data_files(job_context)
final_zip_base = SMASHING_DIR + str(job_context["dataset"].pk) + "_compendia"
shutil.copy("/home/user/README_QUANT.md", job_context["output_dir"] + "/README.md")
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
compendia_organism = _get_organisms(job_context['samples']).first()
compendia_version = _get_next_compendia_version(compendia_organism)
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = archive_path.split('/')[-1]
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.is_compendia = True
archive_computed_file.quant_sf_only = True
archive_computed_file.compendia_organism = compendia_organism
archive_computed_file.compendia_version = compendia_version
archive_computed_file.save()
logger.info("Quantpendia created!",
archive_path=archive_path,
organism_name=compendia_organism.name)
# Upload the result to S3
timestamp = str(int(time.time()))
s3_key = compendia_organism.name + "_" + str(compendia_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)
job_context['result'] = result
job_context['computed_files'] = [archive_computed_file]
job_context['success'] = True
return job_context
def _make_dirs(job_context: Dict):
dataset_id = str(job_context["dataset"].pk)
job_context["work_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
os.makedirs(job_context["work_dir"])
job_context["output_dir"] = job_context["work_dir"] + "output/"
os.makedirs(job_context["output_dir"])
def _get_organisms(aggregated_samples: Dict[str, Sample]) -> List[Organism]:
organisms = set()
for key, samples in aggregated_samples.items():
organism_ids = samples.values_list('organism__id', flat=True).distinct()
organisms.update(organism_ids)
return Organism.objects.filter(id__in=list(organisms))
def _get_next_compendia_version(organism: Organism) -> int:
last_compendia = ComputedFile.objects\
.filter(is_compendia=True, compendia_organism=organism)\
.order_by('compendia_version').first()
if last_compendia:
return last_compendia.compendia_version + 1
# otherwise this is the first compendia that we are generating
return 1
<|code_end|>
| common/data_refinery_common/message_queue.py
<|code_start|>"""Provides an interface to send messages to the Message Queue."""
from __future__ import absolute_import, unicode_literals
import nomad
from django.conf import settings
from enum import Enum
from nomad.api.exceptions import URLNotFoundNomadException
from data_refinery_common.utils import get_env_variable, get_env_variable_gracefully, get_volume_index
from data_refinery_common.models import ProcessorJob, SurveyJob, DownloaderJob
from data_refinery_common.job_lookup import ProcessorPipeline, Downloaders, SurveyJobTypes, SMASHER_JOB_TYPES
from data_refinery_common.logging import get_and_configure_logger
logger = get_and_configure_logger(__name__)
# These two constants refer to image names that should be used for
# multiple jobs.
NOMAD_TRANSCRIPTOME_JOB = "TRANSCRIPTOME_INDEX"
NOMAD_DOWNLOADER_JOB = "DOWNLOADER"
NONE_JOB_ERROR_TEMPLATE = "send_job was called with NONE job_type: {} for {} job {}"
def send_job(job_type: Enum, job, is_dispatch=False) -> bool:
"""Queues a worker job by sending a Nomad Job dispatch message.
job_type must be a valid Enum for ProcessorPipelines or
Downloaders as defined in data_refinery_common.job_lookup.
job must be an existing ProcessorJob or DownloaderJob record.
Returns True if the job was successfully dispatch, return False otherwise.
"""
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
is_processor = True
if job_type is ProcessorPipeline.TRANSCRIPTOME_INDEX_LONG \
or job_type is ProcessorPipeline.TRANSCRIPTOME_INDEX_SHORT:
nomad_job = NOMAD_TRANSCRIPTOME_JOB
elif job_type is ProcessorPipeline.SALMON or job_type is ProcessorPipeline.TXIMPORT:
# Tximport uses the same job specification as Salmon.
nomad_job = ProcessorPipeline.SALMON.value
elif job_type is ProcessorPipeline.AFFY_TO_PCL:
nomad_job = ProcessorPipeline.AFFY_TO_PCL.value
elif job_type is ProcessorPipeline.NO_OP:
nomad_job = ProcessorPipeline.NO_OP.value
elif job_type is ProcessorPipeline.ILLUMINA_TO_PCL:
nomad_job = ProcessorPipeline.ILLUMINA_TO_PCL.value
elif job_type is ProcessorPipeline.SMASHER:
nomad_job = ProcessorPipeline.SMASHER.value
elif job_type is ProcessorPipeline.JANITOR:
nomad_job = ProcessorPipeline.JANITOR.value
elif job_type is ProcessorPipeline.QN_REFERENCE:
nomad_job = ProcessorPipeline.QN_REFERENCE.value
elif job_type is ProcessorPipeline.CREATE_COMPENDIA:
nomad_job = ProcessorPipeline.CREATE_COMPENDIA.value
elif job_type is ProcessorPipeline.CREATE_QUANTPENDIA:
nomad_job = ProcessorPipeline.CREATE_QUANTPENDIA.value
elif job_type is ProcessorPipeline.AGILENT_TWOCOLOR_TO_PCL:
# Agilent twocolor uses the same job specification as Affy.
nomad_job = ProcessorPipeline.AFFY_TO_PCL.value
elif job_type in list(Downloaders):
nomad_job = NOMAD_DOWNLOADER_JOB
is_processor = False
elif job_type in list(SurveyJobTypes):
nomad_job = job_type.value
is_processor = False
elif job_type is Downloaders.NONE:
logger.warn("Not queuing %s job.", job_type, job_id=job_id)
raise ValueError(NONE_JOB_ERROR_TEMPLATE.format(job_type.value, "Downloader", job_id))
elif job_type is ProcessorPipeline.NONE:
logger.warn("Not queuing %s job.", job_type, job_id=job_id)
raise ValueError(NONE_JOB_ERROR_TEMPLATE.format(job_type.value, "Processor", job_id))
else:
raise ValueError("Invalid job_type: {}".format(job_type.value))
logger.debug("Queuing %s nomad job to run job %s with id %d.",
nomad_job,
job_type.value,
job.id)
# We only want to dispatch processor jobs directly.
# Everything else will be handled by the Foreman, which will increment the retry counter.
if is_processor or is_dispatch or (not settings.RUNNING_IN_CLOUD):
# Smasher doesn't need to be on a specific instance since it will
# download all the data to its instance anyway.
if isinstance(job, ProcessorJob) and job_type not in SMASHER_JOB_TYPES:
# Make sure this job goes to the correct EBS resource.
# If this is being dispatched for the first time, make sure that
# we store the currently attached index.
# If this is being dispatched by the Foreman, it should already
# have an attached volume index, so use that.
if job.volume_index is None:
job.volume_index = get_volume_index()
job.save()
nomad_job = nomad_job + "_" + job.volume_index + "_" + str(job.ram_amount)
elif isinstance(job, SurveyJob):
nomad_job = nomad_job + "_" + str(job.ram_amount)
elif isinstance(job, DownloaderJob):
volume_index = job.volume_index if settings.RUNNING_IN_CLOUD else "0"
nomad_job = nomad_job + "_" + volume_index + "_" + str(job.ram_amount)
try:
nomad_response = nomad_client.job.dispatch_job(nomad_job, meta={"JOB_NAME": job_type.value,
"JOB_ID": str(job.id)})
job.nomad_job_id = nomad_response["DispatchedJobID"]
job.save()
return True
except URLNotFoundNomadException:
logger.info("Dispatching Nomad job of type %s for job spec %s to host %s and port %s failed.",
job_type, nomad_job, nomad_host, nomad_port, job=str(job.id))
raise
except Exception as e:
logger.info('Unable to Dispatch Nomad Job.',
job_name=job_type.value,
job_id=str(job.id),
reason=str(e))
raise
else:
job.num_retries = job.num_retries - 1
job.save()
return True
<|code_end|>
foreman/data_refinery_foreman/foreman/management/commands/create_compendia.py
<|code_start|>import sys
from django.core.management.base import BaseCommand
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (Dataset, Experiment, Organism,
ProcessorJob,
ProcessorJobDatasetAssociation)
from data_refinery_common.utils import queryset_iterator
logger = get_and_configure_logger(__name__)
def create_job_for_organism(organism=Organism, quant_sf_only=False, svd_algorithm='ARPACK'):
"""Returns a compendia job for the provided organism.
Fetch all of the experiments and compile large but normally formated Dataset.
"""
data = {}
experiments = Experiment.objects.filter(organisms=organism).prefetch_related('samples')
for experiment in queryset_iterator(experiments):
data[experiment.accession_code] = list(experiment.samples.filter(organism=organism)\
.values_list('accession_code', flat=True))
job = ProcessorJob()
if quant_sf_only:
job.pipeline_applied = ProcessorPipeline.CREATE_QUANTPENDIA.value
else:
job.pipeline_applied = ProcessorPipeline.CREATE_COMPENDIA.value
job.save()
dset = Dataset()
dset.data = data
dset.scale_by = 'NONE'
dset.aggregate_by = 'EXPERIMENT' if quant_sf_only else 'SPECIES'
dset.quantile_normalize = False
dset.quant_sf_only = quant_sf_only
dset.svd_algorithm = svd_algorithm
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
return job
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organisms",
type=str,
help=("Comma separated list of organism names."))
parser.add_argument(
"--quant-sf-only",
type=lambda x: x == "True",
help=("Whether to create a quantpendium or normal compendium. Quantpendium will be aggregated by EXPERIMENT"))
parser.add_argument(
"--svd-algorithm",
type=str,
help=("Specify SVD algorithm applied during imputation ARPACK, RANDOMIZED or NONE to skip."))
def handle(self, *args, **options):
"""Create a compendium for one or more organisms.
If --organism is supplied will immediately create a compedium
for it. If not a new job will be dispatched for each organism
with enough microarray samples except for human and mouse.
"""
if options["organisms"] is None:
all_organisms = Organism.objects.exclude(name__in=["HOMO_SAPIENS", "MUS_MUSCULUS"])
else:
organisms = options["organisms"].upper().replace(" ", "_").split(",")
all_organisms = Organism.objects.filter(name__in=organisms)
# I think we could just use options["quant_sf_only"] but I
# wanna make sure that values that are not True do not trigger
# a truthy evaluation.
quant_sf_only = options["quant_sf_only"] is True
# default algorithm to arpack until we decide that ranomized is preferred
svd_algorithm = 'NONE' if quant_sf_only else 'ARPACK'
if options["svd_algorithm"] in ['ARPACK', 'RANDOMIZED', 'NONE']:
svd_algorithm = options["svd_algorithm"]
logger.debug('Generating compendia for organisms', organisms=all_organisms)
job_pipeline = ProcessorPipeline.CREATE_QUANTPENDIA if quant_sf_only else ProcessorPipeline.CREATE_COMPENDIA
for organism in all_organisms:
job = create_job_for_organism(organism, quant_sf_only, svd_algorithm)
logger.info("Sending compendia job for Organism",
job_id=str(job.pk),
organism=str(organism),
quant_sf_only=quant_sf_only)
send_job(job_pipeline, job)
<|code_end|>
workers/data_refinery_workers/processors/create_quantpendia.py
<|code_start|>import os
import shutil
import time
from django.utils import timezone
from typing import Dict, List, Tuple
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputedFile,
Organism,
Pipeline,
Sample)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
def create_quantpendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_QUANTPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
download_files,
create_result_objects,
utils.end_job])
return job_context
def download_files(job_context: Dict) -> Dict:
job_context['time_start'] = timezone.now()
_make_dirs(job_context)
num_samples = 0
for key, samples in job_context['samples'].items():
outfile_dir = job_context['output_dir'] + key + '/'
os.makedirs(outfile_dir, exist_ok=True)
# download quant.sf files directly into the dataset folder
num_samples += smashing_utils.sync_quant_files(outfile_dir, samples)
job_context['num_samples'] = num_samples
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = "create_quantpendia.py"
return job_context
def create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
result.is_public = True
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_QUANTPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
# Create the resulting archive
smashing_utils.write_non_data_files(job_context)
final_zip_base = SMASHING_DIR + str(job_context["dataset"].pk) + "_compendia"
shutil.copy("/home/user/README_QUANT.md", job_context["output_dir"] + "/README.md")
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
compendia_organism = _get_organisms(job_context['samples']).first()
compendia_version = _get_next_compendia_version(compendia_organism)
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = archive_path.split('/')[-1]
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.is_compendia = True
archive_computed_file.quant_sf_only = True
archive_computed_file.compendia_organism = compendia_organism
archive_computed_file.compendia_version = compendia_version
archive_computed_file.save()
logger.info("Quantpendia created!",
archive_path=archive_path,
organism_name=compendia_organism.name)
# Upload the result to S3
timestamp = str(int(time.time()))
s3_key = compendia_organism.name + "_" + str(compendia_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)
job_context['result'] = result
job_context['computed_files'] = [archive_computed_file]
job_context['success'] = True
return job_context
def _make_dirs(job_context: Dict):
dataset_id = str(job_context["dataset"].pk)
job_context["work_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
os.makedirs(job_context["work_dir"])
job_context["output_dir"] = job_context["work_dir"] + "output/"
os.makedirs(job_context["output_dir"])
def _get_organisms(aggregated_samples: Dict[str, Sample]) -> List[Organism]:
organisms = set()
for key, samples in aggregated_samples.items():
organism_ids = samples.values_list('organism__id', flat=True).distinct()
organisms.update(organism_ids)
return Organism.objects.filter(id__in=list(organisms))
def _get_next_compendia_version(organism: Organism) -> int:
last_compendia = ComputedFile.objects\
.filter(is_compendia=True, compendia_organism=organism)\
.order_by('compendia_version').first()
if last_compendia:
return last_compendia.compendia_version + 1
# otherwise this is the first compendia that we are generating
return 1
<|code_end|>
|
Quantpendia failed to upload to S3
### Context
We kicked off quantpendia jobs for all organisms but they weren't succeeding because they couldn't upload to S3.
### Problem or idea
This is probably just because the worker instances don't have access to the compendia S3 bucket. The smasher probably has those permissions, but it looks like the workers don't.
### Solution or next step
Give worker instances permissions to push to the compendia S3 bucket.
| workers/data_refinery_workers/processors/create_quantpendia.py
<|code_start|>import os
import logging
import shutil
import time
from django.utils import timezone
from typing import Dict, List, Tuple
import psutil
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputedFile,
Organism,
Pipeline,
Sample)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
logger.setLevel(logging.getLevelName('DEBUG'))
def create_quantpendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_QUANTPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
make_dirs,
download_files,
create_result_objects,
remove_job_dir,
utils.end_job])
return job_context
def download_files(job_context: Dict) -> Dict:
job_context['time_start'] = timezone.now()
num_samples = 0
for key, samples in job_context['samples'].items():
outfile_dir = job_context['output_dir'] + key + '/'
os.makedirs(outfile_dir, exist_ok=True)
logger.debug("Downloading quant.sf files for quantpendia.",
accession_code=key,
job_id=job_context['job_id'],
**get_process_stats())
# download quant.sf files directly into the dataset folder
num_samples += smashing_utils.sync_quant_files(outfile_dir, samples)
job_context['num_samples'] = num_samples
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = "create_quantpendia.py"
logger.debug("Finished downloading quant.sf files for quantpendia.",
job_id=job_context['job_id'],
total_downloaded_files=num_samples,
**get_process_stats())
return job_context
def create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
result.is_public = True
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_QUANTPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
compendia_organism = _get_organisms(job_context['samples']).first()
# Create the resulting archive
smashing_utils.write_non_data_files(job_context)
final_zip_base = job_context['job_dir'] + compendia_organism.name + "_rnaseq_compendia"
shutil.copy("/home/user/README_QUANT.md", job_context["output_dir"] + "/README.md")
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
compendia_version = _get_next_compendia_version(compendia_organism)
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = archive_path.split('/')[-1]
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.is_compendia = True
archive_computed_file.quant_sf_only = True
archive_computed_file.compendia_organism = compendia_organism
archive_computed_file.compendia_version = compendia_version
archive_computed_file.save()
logger.info("Quantpendia created!",
archive_path=archive_path,
organism_name=compendia_organism.name)
# Upload the result to S3
timestamp = str(int(time.time()))
s3_key = compendia_organism.name + "_" + str(compendia_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)
job_context['result'] = result
job_context['computed_files'] = [archive_computed_file]
job_context['success'] = True
return job_context
def remove_job_dir(job_context: Dict):
""" remove the directory when the job is successful. At this point
the quantpendia was already zipped and uploaded. """
shutil.rmtree(job_context["job_dir"], ignore_errors=True)
return job_context
def make_dirs(job_context: Dict):
dataset_id = str(job_context["dataset"].pk)
job_context["job_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
os.makedirs(job_context["job_dir"], exist_ok=True)
job_context["output_dir"] = job_context["job_dir"] + "output/"
os.makedirs(job_context["output_dir"], exist_ok=True)
return job_context
def get_process_stats():
BYTES_IN_GB = 1024 * 1024 * 1024
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
return { 'total_cpu': psutil.cpu_percent(), 'process_ram': ram_in_GB }
def _get_organisms(aggregated_samples: Dict[str, Sample]) -> List[Organism]:
organisms = set()
for key, samples in aggregated_samples.items():
organism_ids = samples.values_list('organism__id', flat=True).distinct()
organisms.update(organism_ids)
return Organism.objects.filter(id__in=list(organisms))
def _get_next_compendia_version(organism: Organism) -> int:
last_compendia = ComputedFile.objects\
.filter(is_compendia=True, quant_sf_only=True, compendia_organism=organism)\
.order_by('-compendia_version').first()
if last_compendia:
return last_compendia.compendia_version + 1
# otherwise this is the first compendia that we are generating
return 1
<|code_end|>
| workers/data_refinery_workers/processors/create_quantpendia.py
<|code_start|>import os
import logging
import shutil
import time
from django.utils import timezone
from django.conf import settings
from typing import Dict, List, Tuple
import psutil
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputedFile,
Organism,
Pipeline,
Sample)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
logger.setLevel(logging.getLevelName('DEBUG'))
def create_quantpendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_QUANTPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
make_dirs,
download_files,
create_result_objects,
remove_job_dir,
utils.end_job])
return job_context
def download_files(job_context: Dict) -> Dict:
job_context['time_start'] = timezone.now()
num_samples = 0
for key, samples in job_context['samples'].items():
outfile_dir = job_context['output_dir'] + key + '/'
os.makedirs(outfile_dir, exist_ok=True)
logger.debug("Downloading quant.sf files for quantpendia.",
accession_code=key,
job_id=job_context['job_id'],
**get_process_stats())
# download quant.sf files directly into the dataset folder
num_samples += smashing_utils.sync_quant_files(outfile_dir, samples)
job_context['num_samples'] = num_samples
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = "create_quantpendia.py"
logger.debug("Finished downloading quant.sf files for quantpendia.",
job_id=job_context['job_id'],
total_downloaded_files=num_samples,
**get_process_stats())
return job_context
def create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
result.is_public = True
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_QUANTPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
compendia_organism = _get_organisms(job_context['samples']).first()
# Create the resulting archive
smashing_utils.write_non_data_files(job_context)
final_zip_base = job_context['job_dir'] + compendia_organism.name + "_rnaseq_compendia"
shutil.copy("/home/user/README_QUANT.md", job_context["output_dir"] + "/README.md")
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
compendia_version = _get_next_compendia_version(compendia_organism)
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = archive_path.split('/')[-1]
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.is_compendia = True
archive_computed_file.quant_sf_only = True
archive_computed_file.compendia_organism = compendia_organism
archive_computed_file.compendia_version = compendia_version
archive_computed_file.save()
logger.info("Quantpendia created!",
archive_path=archive_path,
organism_name=compendia_organism.name)
# Upload the result to S3
timestamp = str(int(time.time()))
s3_key = compendia_organism.name + "_" + str(compendia_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)
job_context['result'] = result
job_context['success'] = True
return job_context
def remove_job_dir(job_context: Dict):
""" remove the directory when the job is successful. At this point
the quantpendia was already zipped and uploaded. """
# don't remove the files when running locally or for tests
if settings.RUNNING_IN_CLOUD:
shutil.rmtree(job_context["job_dir"], ignore_errors=True)
return job_context
def make_dirs(job_context: Dict):
dataset_id = str(job_context["dataset"].pk)
job_context["job_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
os.makedirs(job_context["job_dir"], exist_ok=True)
job_context["output_dir"] = job_context["job_dir"] + "output/"
os.makedirs(job_context["output_dir"], exist_ok=True)
return job_context
def get_process_stats():
BYTES_IN_GB = 1024 * 1024 * 1024
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
return { 'total_cpu': psutil.cpu_percent(), 'process_ram': ram_in_GB }
def _get_organisms(aggregated_samples: Dict[str, Sample]) -> List[Organism]:
organisms = set()
for key, samples in aggregated_samples.items():
organism_ids = samples.values_list('organism__id', flat=True).distinct()
organisms.update(organism_ids)
return Organism.objects.filter(id__in=list(organisms))
def _get_next_compendia_version(organism: Organism) -> int:
last_compendia = ComputedFile.objects\
.filter(is_compendia=True, quant_sf_only=True, compendia_organism=organism)\
.order_by('-compendia_version').first()
if last_compendia:
return last_compendia.compendia_version + 1
# otherwise this is the first compendia that we are generating
return 1
<|code_end|>
|
Extend processor exception classes to include additional arbitrary context
### Context
#1843 is dope. However right now those exceptions can only be used to log the job id. If we wanted to log a sample accession or what have you, we couldn't do it as it is.
### Problem or idea
We've created some new exception classes, and so we can add any properties/constructor parameters we want to them. I think it'd be helpful if we added a parameter that would store the additional context we want logged and then stored it on the ProcessorJobException object. Then where we handle those exceptions we can check that field and see if there's additional context we should be logging.
### Solution or next step
Implement what's above and use it in a few places. We can slowly transition our codebase to using this pattern more broadly over time once we have this implemented.
| workers/data_refinery_workers/processors/utils.py
<|code_start|>import os
import random
import shutil
import signal
import string
import subprocess
import sys
import yaml
from django.conf import settings
from django.utils import timezone
from typing import List, Dict, Callable
from data_refinery_common.job_lookup import ProcessorEnum, ProcessorPipeline, SMASHER_JOB_TYPES
from data_refinery_common.job_management import create_downloader_job
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
Dataset,
DownloaderJob,
DownloaderJobOriginalFileAssociation,
OriginalFile,
OriginalFileSampleAssociation,
Pipeline,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
ProcessorJobOriginalFileAssociation,
Sample,
)
from data_refinery_common.utils import (
get_env_variable,
get_env_variable_gracefully,
get_instance_id,
)
logger = get_and_configure_logger(__name__)
# Let this fail if SYSTEM_VERSION is unset.
SYSTEM_VERSION = get_env_variable("SYSTEM_VERSION")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
S3_QN_TARGET_BUCKET_NAME = get_env_variable("S3_QN_TARGET_BUCKET_NAME", "data-refinery")
DIRNAME = os.path.dirname(os.path.abspath(__file__))
CURRENT_JOB = None
def signal_handler(sig, frame):
"""Signal Handler, works for both SIGTERM and SIGINT"""
global CURRENT_JOB
if CURRENT_JOB:
CURRENT_JOB.success = False
CURRENT_JOB.end_time = timezone.now()
CURRENT_JOB.num_retries = CURRENT_JOB.num_retries - 1
CURRENT_JOB.failure_reason = "Interruped by SIGTERM/SIGINT: " + str(sig)
CURRENT_JOB.save()
sys.exit(0)
def prepare_original_files(job_context):
""" Provision in the Job context for OriginalFile-driven processors
"""
job = job_context["job"]
original_files = job.original_files.all()
if original_files.count() == 0:
raise ProcessorJobError('No files were found for the job.', success=False)
undownloaded_files = set()
for original_file in original_files:
if original_file.needs_downloading(job_context["job_id"]):
if original_file.is_downloaded:
# If it needs to be downloaded then it's not
# downloaded and the is_downloaded field should stop
# lying about that.
original_file.is_downloaded = False
original_file.save()
undownloaded_files.add(original_file)
if undownloaded_files:
logger.info(
("One or more files found which were missing or not downloaded."
" Creating downloader jobs for them and deleting this job."),
processor_job=job.id,
missing_files=list(undownloaded_files)
)
was_job_created = create_downloader_job(
undownloaded_files,
processor_job_id=job_context["job_id"],
force=True
)
if not was_job_created:
raise ProcessorJobError('Missing file for processor job but unable to recreate downloader jobs!',
success=False)
# If we can't process the data because it's not on the disk we
# can't mark the job as a success since it obviously didn't
# succeed. However if we mark it as a failure the job could be
# retried triggering yet another DownloaderJob to be created
# to re-download the data. Therefore the best option is to
# delete this job.
job.delete()
raise ProcessorJobDeleteSelf('We can not process the data because it is not on the disk')
job_context["original_files"] = original_files
first_original_file = original_files.first()
samples = Sample.objects.filter(original_files=first_original_file)
job_context['samples'] = samples
job_context["computed_files"] = []
return job_context
def prepare_dataset(job_context):
""" Provision in the Job context for Dataset-driven processors
"""
job = job_context["job"]
job_datasets = job.datasets.all()
# This should never be more than one!
if job_datasets.count() > 1:
raise ProcessorJobError('More than one dataset for processor job!', success=False)
elif job_datasets.count() == 0:
raise ProcessorJobError('No datasets found for processor job!', success=False)
dataset = job_datasets.first()
dataset.is_processing = True
dataset.save()
# Get the samples to smash
job_context["dataset"] = dataset
job_context["samples"] = dataset.get_aggregated_samples()
job_context["experiments"] = dataset.get_experiments()
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def start_job(job_context: Dict):
"""A processor function to start jobs.
Record in the database that this job is being started and
retrieves the job's batches from the database and adds them to the
dictionary passed in with the key 'batches'.
"""
job = job_context["job"]
original_file = job.original_files.first()
if not job.pipeline_applied == ProcessorPipeline.TXIMPORT.value and original_file\
and not original_file.needs_processing(job_context["job_id"]):
failure_reason = ("Sample has a good computed file, it must have been processed, "
"so it doesn't need to be downloaded! Aborting!")
logger.error(failure_reason,
job_id=job.id,
original_file=original_file)
job_context["original_files"] = []
job_context["computed_files"] = []
job_context['abort'] = True
# Will be saved by end_job.
job_context['job'].failure_reason = failure_reason
return job_context
# Set up the SIGTERM handler so we can appropriately handle being interrupted.
# (`docker stop` uses SIGTERM, not SIGINT.)
# (however, Nomad sends an SIGINT so catch both.)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# This job should not have been started, for some reason Nomad restarts some of our jobs
# https://github.com/AlexsLemonade/refinebio/issues/1487
if job.start_time is not None and settings.RUNNING_IN_CLOUD:
# Let's just log the event and let the job run instead of failing
# and also reset the endtime and failure reason, since those fields might have been set
logger.warn('ProcessorJob was restarted by Nomad. We do not know why this happened',
processor_job=job.id,
success=job.success,
failure_reason=job.failure_reason,
start_time=job.start_time,
end_time=job.end_time)
job.end_time = None
job.failure_reason = None
job.worker_id = get_instance_id()
job.worker_version = SYSTEM_VERSION
job.start_time = timezone.now()
job.save()
global CURRENT_JOB
CURRENT_JOB = job
logger.debug("Starting processor Job.", processor_job=job.id, pipeline=job.pipeline_applied)
# Janitor jobs don't operate on file objects.
# Tximport jobs don't need to download the original file, they
# just need it to know what experiment to process.
if job.pipeline_applied not in [ProcessorPipeline.JANITOR.value, ProcessorPipeline.TXIMPORT.value]:
# Some jobs take OriginalFiles, other take Datasets
if ProcessorPipeline[job.pipeline_applied] not in SMASHER_JOB_TYPES:
job_context = prepare_original_files(job_context)
if not job_context.get("success", True):
return job_context
else:
job_context = prepare_dataset(job_context)
if not job_context.get("success", True):
return job_context
else:
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def end_job(job_context: Dict, abort=False):
"""A processor function to end jobs.
Record in the database that this job has completed and that
the samples have been processed if not aborted.
"""
job = job_context["job"]
if "success" in job_context:
success = job_context["success"]
else:
success = True
# Upload first so if this fails we can set success = False and let
# the rest of the function mark it as failed.
if success:
# QN reference files go to a special bucket so they can be
# publicly available.
if job_context["job"].pipeline_applied == "QN_REFERENCE":
s3_bucket = S3_QN_TARGET_BUCKET_NAME
else:
s3_bucket = S3_BUCKET_NAME
# S3-sync Computed Files
for computed_file in job_context.get('computed_files', []):
# Ensure even distribution across S3 servers
nonce = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(24))
result = computed_file.sync_to_s3(s3_bucket, nonce + "_" + computed_file.filename)
if result:
computed_file.delete_local_file()
else:
success = False
job_context['success'] = False
job.failure_reason = "Failed to upload computed file."
break
if not success:
for computed_file in job_context.get('computed_files', []):
computed_file.delete_local_file()
if computed_file.id:
computed_file.delete()
if not abort:
if job_context.get("success", False) \
and not (job_context["job"].pipeline_applied in [ProcessorPipeline.SMASHER.value,
ProcessorPipeline.QN_REFERENCE.value,
ProcessorPipeline.CREATE_COMPENDIA.value,
ProcessorPipeline.CREATE_QUANTPENDIA.value,
ProcessorPipeline.JANITOR.value]):
# Salmon requires the final `tximport` step to be fully `is_processed`.
mark_as_processed = True
if (job_context["job"].pipeline_applied == "SALMON" and not job_context.get('tximported', False)):
mark_as_processed = False
if mark_as_processed:
# This handles most of our cases
unique_experiments = []
for sample in job_context.get("samples", []):
sample.is_processed = True
sample.save()
if sample.experiments.all().count() > 0:
unique_experiments = list(set(unique_experiments + sample.experiments.all()[::1]))
# Explicitly for the single-salmon scenario
if 'sample' in job_context:
sample = job_context['sample']
sample.is_processed = True
sample.save()
for experiment in unique_experiments:
experiment.update_num_samples()
# If we are aborting, it's because we want to do something
# different, so leave the original files so that "something
# different" can use them.
if (success or job.no_retry) and not abort:
# Cleanup Original Files
if 'original_files' in job_context:
for original_file in job_context['original_files']:
original_file.delete_local_file()
# If the pipeline includes any steps, save it.
if 'pipeline' in job_context:
pipeline = job_context['pipeline']
if len(pipeline.steps):
pipeline.save()
if "work_dir" in job_context \
and job_context["job"].pipeline_applied != ProcessorPipeline.CREATE_COMPENDIA.value \
and settings.RUNNING_IN_CLOUD:
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
job.success = success
job.end_time = timezone.now()
job.save()
if success:
logger.debug("Processor job completed successfully.",
processor_job=job.id,
pipeline_applied=job.pipeline_applied)
else:
if not job.failure_reason:
logger.error("Processor job failed without having failure_reason set. FIX ME!!!!!!!!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry)
else:
logger.error("Processor job failed!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
failure_reason=job.failure_reason)
# Return Final Job context so testers can check it
return job_context
def run_pipeline(start_value: Dict, pipeline: List[Callable]):
"""Runs a pipeline of processor functions.
start_value must contain a key 'job_id' which is a valid id for a
ProcessorJob record.
Each processor fuction must accept a dictionary and return a
dictionary.
Any processor function which returns a dictionary containing a key
of 'success' with a value of False will cause the pipeline to
terminate with a call to utils.end_job.
The key 'job' is reserved for the ProcessorJob currently being
run. It is required that the dictionary returned by each
processor function preserve the mapping for 'job' that was passed
into it.
"""
job_id = start_value["job_id"]
try:
job = ProcessorJob.objects.get(id=job_id)
except ProcessorJob.DoesNotExist:
logger.error("Cannot find processor job record.", processor_job=job_id)
return
if len(pipeline) == 0:
logger.error("Empty pipeline specified.", procesor_job=job_id)
last_result = start_value
last_result["job"] = job
for processor in pipeline:
try:
last_result = processor(last_result)
except ProcessorJobDeleteSelf as e:
logger.info('Processor Job deleted itself.', reason=e.reason, processor_job=job_id)
break
except ProcessorJobError as e:
job.failure_reason = e.failure_reason
if e.success is not None:
job.succeess = e.success
logger.exception(e.failure_reason, processor_job=job.id)
return end_job(last_result)
except Exception as e:
failure_reason = ("Unhandled exception caught while running processor"
" function {} in pipeline: ").format(processor.__name__)
logger.exception(failure_reason,
no_retry=job.no_retry,
processor_job=job_id)
last_result["success"] = False
last_result["job"].failure_reason = failure_reason + str(e)
return end_job(last_result)
if "success" in last_result and last_result["success"] is False:
logger.error("Processor function %s failed. Terminating pipeline.",
processor.__name__,
processor_job=job_id,
failure_reason=last_result["job"].failure_reason)
return end_job(last_result)
if last_result.get("abort", False):
return end_job(last_result, abort=True)
return last_result
class ProcessorJobError(Exception):
""" General processor job error class. """
def __init__(self, failure_reason, *, success=None, original_exception=None):
super(ProcessorJobError, self).__init__(failure_reason)
self.failure_reason = failure_reason
self.success = success
self.original_exception = original_exception
class ProcessorJobDeleteSelf(Exception):
""" Triggered when a processor job deletes itself. """
def __init__(self, reason):
super(ProcessorJobDeleteSelf, self).__init__(reason)
self.reason = reason
def get_os_distro():
"""Returns a string of OS distribution.
Since we are using Docker, this function only considers Linux distribution.
Alternative files on Linux: /etc/os-release, /etc/lsb-release
As a matter of fact, "/etc/issue" doesn't exist on Mac OS X. We can use
"sw_vers" command to find its OS information.
A more cross-platform solution is using "platform" module in Python.
"""
with open('/etc/issue') as distro_fh:
return distro_fh.readline().strip('\l\n\\n ')
def get_os_pkgs(pkg_list):
"""Returns a dictionay in which each key is the name of an os-lvel
package and the corresponding value is the package's version.
This function assumes the package manager is Debian-based (dpkg/apt).
"""
pkg_info = dict()
for pkg in pkg_list:
process_done = subprocess.run(['dpkg-query', '--show', pkg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("OS-level package %s not found: %s" %
(pkg, process_done.stderr.decode().strip())
)
version = process_done.stdout.decode().strip().split('\t')[-1]
pkg_info[pkg] = version
return pkg_info
def get_cmd_lines(cmd_list):
"""Returns a dictionary in which each key is a command string and
the corresponding value is the command's stripped output.
"""
cmd_info = dict()
for cmd in cmd_list:
process_done = subprocess.run(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("Failed to run command line '%s': %s" %
(cmd, process_done.stderr.decode().strip())
)
output_bytes = process_done.stdout
# Workaround for the "salmontools --version"
# command, whose outputs are sent to stderr instead of stdout.
# Alternatively, we could have used "stderr=subprocess.STDOUT" when
# initializing process_done, but it is probably a better idea to
# keep stdout and stderr separate.
base_cmd = cmd.strip().split()[0]
if base_cmd == "salmontools":
output_bytes = process_done.stderr
cmd_output = output_bytes.decode().strip()
cmd_info[cmd] = cmd_output
return cmd_info
def get_pip_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a pip-installed
package and the corresponding value is the package's version.
Instead of using: `pip show pkg | grep Version | awk '{print $2}'` to get
each package's version, we save the output of `pip freeze` first, then
check the version of each input package in pkg_list. This approach
launches the subprocess only once and (hopefully) saves some computational
resource.
"""
process_done = subprocess.run(['pip', 'freeze'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("'pip freeze' failed: %s" % process_done.stderr.decode().strip())
frozen_pkgs = dict()
for item in process_done.stdout.decode().split():
name, version = item.split("==")
frozen_pkgs[name] = version
pkg_info = dict()
for pkg in pkg_list:
try:
version = frozen_pkgs[pkg]
except KeyError:
raise Exception("Pip package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_bioc_version():
"""Returns a string that is the version of "Bioconductor" package in R.
Note that the data frame returned by installed.packages() does NOT include
a package named "Bioconductor", so we have to launch another R command to
find "Bioconductor" version.
"""
r_command = "tools:::.BioC_version_associated_with_R_version()"
process_done = subprocess.run(['Rscript', '-e', r_command],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception('R command failed to retrieve Bioconductor version: %s' %
process_done.stderr.decode().strip()
)
version = process_done.stdout.decode().strip().split()[-1]
version = version[1:-1] # Remove the leading and trailing non-ascii characters.
if len(version) == 0:
raise Exception('Bioconductor not found')
return version
def get_r_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a R package
and the corresponding value is the package's version.
"""
# Use "Rscript -e <R_commands>" command to get all user-installed R packages.
r_commands = "packages.df <- as.data.frame(installed.packages()[, c(1, 3:4)]); \
packages.df <- packages.df[is.na(packages.df$Priority), 1:2, drop=FALSE]; \
colnames(packages.df) <- NULL; \
print(packages.df, row.names=FALSE);"
process_done = subprocess.run(['Rscript', '-e', r_commands],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception('R command failed to retrieves installed packages: %s' %
process_done.stderr.decode().strip()
)
r_pkgs = dict()
for item in process_done.stdout.decode().strip().split('\n'):
name, version = item.strip().split()
r_pkgs[name] = version
# "Brainarray" is a collection that consists of 121 ".*ensgprobe" packages.
# They share the same version number, so we use 'hgu133plus2hsensgprobe'
# package to report this uniform version.
ba_proxy_pkg = 'hgu133plus2hsensgprobe'
pkg_info = dict()
for pkg in pkg_list:
if pkg == 'Bioconductor':
version = get_bioc_version()
else:
try:
version = r_pkgs[pkg] if pkg != "Brainarray" else r_pkgs[ba_proxy_pkg]
except KeyError:
raise Exception("R package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_checksums(filenames_list):
"""Returns a dictionary in which each key is a file's name and the
corresponding value is the file's md5 checksum.
"""
checksums = dict()
for filename in filenames_list:
abs_filepath = os.path.join(DIRNAME, filename)
process_done = subprocess.run(['md5sum', abs_filepath],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("md5sum command error:",
process_done.stderr.decode().strip())
checksum_str = process_done.stdout.decode().strip().split()[0]
checksums[filename] = checksum_str
return checksums
def get_runtime_env(yml_filename):
"""Reads input YAML filename and returns a dictionary in which each key
is a category name of runtime environment and the corresponding value
is an object that includes version information of packages listed in
that category.
"""
runtime_env = dict()
with open(yml_filename) as yml_fh:
pkgs = yaml.load(yml_fh)
for pkg_type, pkg_list in pkgs.items():
if pkg_type == 'os_distribution':
value = get_os_distro()
elif pkg_type == 'os_pkg':
value = get_os_pkgs(pkg_list)
elif pkg_type == 'cmd_line':
value = get_cmd_lines(pkg_list)
elif pkg_type == 'python':
value = get_pip_pkgs(pkg_list)
elif pkg_type == 'R':
value = get_r_pkgs(pkg_list)
elif pkg_type == 'checksum':
value = get_checksums(pkg_list)
else:
raise Exception("Unknown category in %s: %s" % (yml_filename, pkg_type))
runtime_env[pkg_type] = value
return runtime_env
def find_processor(enum_key):
"""Retursn either a newly created Processor record, or the one in
database that matches the current processor name, version and environment.
"""
name = ProcessorEnum[enum_key].value['name']
docker_image = ProcessorEnum[enum_key].value['docker_img']
# In current implementation, ALWAYS get the runtime environment.
yml_path = os.path.join(DIRNAME, ProcessorEnum[enum_key].value['yml_file'])
environment = get_runtime_env(yml_path)
obj, status = Processor.objects.get_or_create(name=name,
version=SYSTEM_VERSION,
docker_image=docker_image,
environment=environment)
return obj
def handle_processor_exception(job_context, processor_key, ex):
err_str = "Failed to set processor: %s" % ex
logger.error(err_str, job_id=job_context["job"].id, processor=processor_key)
job_context["job"].failure_reason = err_str
job_context["success"] = False
return job_context
<|code_end|>
| workers/data_refinery_workers/processors/utils.py
<|code_start|>import os
import random
import shutil
import signal
import string
import subprocess
import sys
import yaml
from django.conf import settings
from django.utils import timezone
from typing import List, Dict, Callable
from data_refinery_common.job_lookup import ProcessorEnum, ProcessorPipeline, SMASHER_JOB_TYPES
from data_refinery_common.job_management import create_downloader_job
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
Dataset,
DownloaderJob,
DownloaderJobOriginalFileAssociation,
OriginalFile,
OriginalFileSampleAssociation,
Pipeline,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
ProcessorJobOriginalFileAssociation,
Sample,
)
from data_refinery_common.utils import (
get_env_variable,
get_env_variable_gracefully,
get_instance_id,
)
logger = get_and_configure_logger(__name__)
# Let this fail if SYSTEM_VERSION is unset.
SYSTEM_VERSION = get_env_variable("SYSTEM_VERSION")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
S3_QN_TARGET_BUCKET_NAME = get_env_variable("S3_QN_TARGET_BUCKET_NAME", "data-refinery")
DIRNAME = os.path.dirname(os.path.abspath(__file__))
CURRENT_JOB = None
def signal_handler(sig, frame):
"""Signal Handler, works for both SIGTERM and SIGINT"""
global CURRENT_JOB
if CURRENT_JOB:
CURRENT_JOB.success = False
CURRENT_JOB.end_time = timezone.now()
CURRENT_JOB.num_retries = CURRENT_JOB.num_retries - 1
CURRENT_JOB.failure_reason = "Interruped by SIGTERM/SIGINT: " + str(sig)
CURRENT_JOB.save()
sys.exit(0)
def prepare_original_files(job_context):
""" Provision in the Job context for OriginalFile-driven processors
"""
job = job_context["job"]
original_files = job.original_files.all()
if original_files.count() == 0:
raise ProcessorJobError('No files were found for the job.', success=False)
undownloaded_files = set()
for original_file in original_files:
if original_file.needs_downloading(job_context["job_id"]):
if original_file.is_downloaded:
# If it needs to be downloaded then it's not
# downloaded and the is_downloaded field should stop
# lying about that.
original_file.is_downloaded = False
original_file.save()
undownloaded_files.add(original_file)
if undownloaded_files:
logger.info(
("One or more files found which were missing or not downloaded."
" Creating downloader jobs for them and deleting this job."),
processor_job=job.id,
missing_files=list(undownloaded_files)
)
was_job_created = create_downloader_job(
undownloaded_files,
processor_job_id=job_context["job_id"],
force=True
)
if not was_job_created:
raise ProcessorJobError('Missing file for processor job but unable to recreate downloader jobs!',
success=False)
# If we can't process the data because it's not on the disk we
# can't mark the job as a success since it obviously didn't
# succeed. However if we mark it as a failure the job could be
# retried triggering yet another DownloaderJob to be created
# to re-download the data. Therefore the best option is to
# delete this job.
job.delete()
raise ProcessorJobDeleteSelf('We can not process the data because it is not on the disk')
job_context["original_files"] = original_files
first_original_file = original_files.first()
samples = Sample.objects.filter(original_files=first_original_file)
job_context['samples'] = samples
job_context["computed_files"] = []
return job_context
def prepare_dataset(job_context):
""" Provision in the Job context for Dataset-driven processors
"""
job = job_context["job"]
job_datasets = job.datasets.all()
# This should never be more than one!
if job_datasets.count() > 1:
raise ProcessorJobError('More than one dataset for processor job!', success=False)
elif job_datasets.count() == 0:
raise ProcessorJobError('No datasets found for processor job!', success=False)
dataset = job_datasets.first()
dataset.is_processing = True
dataset.save()
# Get the samples to smash
job_context["dataset"] = dataset
job_context["samples"] = dataset.get_aggregated_samples()
job_context["experiments"] = dataset.get_experiments()
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def start_job(job_context: Dict):
"""A processor function to start jobs.
Record in the database that this job is being started and
retrieves the job's batches from the database and adds them to the
dictionary passed in with the key 'batches'.
"""
job = job_context["job"]
original_file = job.original_files.first()
if not job.pipeline_applied == ProcessorPipeline.TXIMPORT.value and original_file\
and not original_file.needs_processing(job_context["job_id"]):
failure_reason = ("Sample has a good computed file, it must have been processed, "
"so it doesn't need to be downloaded! Aborting!")
logger.error(failure_reason,
job_id=job.id,
original_file=original_file)
job_context["original_files"] = []
job_context["computed_files"] = []
job_context['abort'] = True
# Will be saved by end_job.
job_context['job'].failure_reason = failure_reason
return job_context
# Set up the SIGTERM handler so we can appropriately handle being interrupted.
# (`docker stop` uses SIGTERM, not SIGINT.)
# (however, Nomad sends an SIGINT so catch both.)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# This job should not have been started, for some reason Nomad restarts some of our jobs
# https://github.com/AlexsLemonade/refinebio/issues/1487
if job.start_time is not None and settings.RUNNING_IN_CLOUD:
# Let's just log the event and let the job run instead of failing
# and also reset the endtime and failure reason, since those fields might have been set
logger.warn('ProcessorJob was restarted by Nomad. We do not know why this happened',
processor_job=job.id,
success=job.success,
failure_reason=job.failure_reason,
start_time=job.start_time,
end_time=job.end_time)
job.end_time = None
job.failure_reason = None
job.worker_id = get_instance_id()
job.worker_version = SYSTEM_VERSION
job.start_time = timezone.now()
job.save()
global CURRENT_JOB
CURRENT_JOB = job
logger.debug("Starting processor Job.", processor_job=job.id, pipeline=job.pipeline_applied)
# Janitor jobs don't operate on file objects.
# Tximport jobs don't need to download the original file, they
# just need it to know what experiment to process.
if job.pipeline_applied not in [ProcessorPipeline.JANITOR.value, ProcessorPipeline.TXIMPORT.value]:
# Some jobs take OriginalFiles, other take Datasets
if ProcessorPipeline[job.pipeline_applied] not in SMASHER_JOB_TYPES:
job_context = prepare_original_files(job_context)
if not job_context.get("success", True):
return job_context
else:
job_context = prepare_dataset(job_context)
if not job_context.get("success", True):
return job_context
else:
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def end_job(job_context: Dict, abort=False):
"""A processor function to end jobs.
Record in the database that this job has completed and that
the samples have been processed if not aborted.
"""
job = job_context["job"]
if "success" in job_context:
success = job_context["success"]
else:
success = True
# Upload first so if this fails we can set success = False and let
# the rest of the function mark it as failed.
if success:
# QN reference files go to a special bucket so they can be
# publicly available.
if job_context["job"].pipeline_applied == "QN_REFERENCE":
s3_bucket = S3_QN_TARGET_BUCKET_NAME
else:
s3_bucket = S3_BUCKET_NAME
# S3-sync Computed Files
for computed_file in job_context.get('computed_files', []):
# Ensure even distribution across S3 servers
nonce = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(24))
result = computed_file.sync_to_s3(s3_bucket, nonce + "_" + computed_file.filename)
if result:
computed_file.delete_local_file()
else:
success = False
job_context['success'] = False
job.failure_reason = "Failed to upload computed file."
break
if not success:
for computed_file in job_context.get('computed_files', []):
computed_file.delete_local_file()
if computed_file.id:
computed_file.delete()
if not abort:
if job_context.get("success", False) \
and not (job_context["job"].pipeline_applied in [ProcessorPipeline.SMASHER.value,
ProcessorPipeline.QN_REFERENCE.value,
ProcessorPipeline.CREATE_COMPENDIA.value,
ProcessorPipeline.CREATE_QUANTPENDIA.value,
ProcessorPipeline.JANITOR.value]):
# Salmon requires the final `tximport` step to be fully `is_processed`.
mark_as_processed = True
if (job_context["job"].pipeline_applied == "SALMON" and not job_context.get('tximported', False)):
mark_as_processed = False
if mark_as_processed:
# This handles most of our cases
unique_experiments = []
for sample in job_context.get("samples", []):
sample.is_processed = True
sample.save()
if sample.experiments.all().count() > 0:
unique_experiments = list(set(unique_experiments + sample.experiments.all()[::1]))
# Explicitly for the single-salmon scenario
if 'sample' in job_context:
sample = job_context['sample']
sample.is_processed = True
sample.save()
for experiment in unique_experiments:
experiment.update_num_samples()
# If we are aborting, it's because we want to do something
# different, so leave the original files so that "something
# different" can use them.
if (success or job.no_retry) and not abort:
# Cleanup Original Files
if 'original_files' in job_context:
for original_file in job_context['original_files']:
original_file.delete_local_file()
# If the pipeline includes any steps, save it.
if 'pipeline' in job_context:
pipeline = job_context['pipeline']
if len(pipeline.steps):
pipeline.save()
if "work_dir" in job_context \
and job_context["job"].pipeline_applied != ProcessorPipeline.CREATE_COMPENDIA.value \
and settings.RUNNING_IN_CLOUD:
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
job.success = success
job.end_time = timezone.now()
job.save()
if success:
logger.debug("Processor job completed successfully.",
processor_job=job.id,
pipeline_applied=job.pipeline_applied)
else:
if not job.failure_reason:
logger.error("Processor job failed without having failure_reason set. FIX ME!!!!!!!!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry)
else:
logger.error("Processor job failed!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
failure_reason=job.failure_reason)
# Return Final Job context so testers can check it
return job_context
def run_pipeline(start_value: Dict, pipeline: List[Callable]):
"""Runs a pipeline of processor functions.
start_value must contain a key 'job_id' which is a valid id for a
ProcessorJob record.
Each processor fuction must accept a dictionary and return a
dictionary.
Any processor function which returns a dictionary containing a key
of 'success' with a value of False will cause the pipeline to
terminate with a call to utils.end_job.
The key 'job' is reserved for the ProcessorJob currently being
run. It is required that the dictionary returned by each
processor function preserve the mapping for 'job' that was passed
into it.
"""
job_id = start_value["job_id"]
try:
job = ProcessorJob.objects.get(id=job_id)
except ProcessorJob.DoesNotExist:
logger.error("Cannot find processor job record.", processor_job=job_id)
return
if len(pipeline) == 0:
logger.error("Empty pipeline specified.", procesor_job=job_id)
last_result = start_value
last_result["job"] = job
for processor in pipeline:
try:
last_result = processor(last_result)
except ProcessorJobDeleteSelf as e:
logger.info('Processor Job deleted itself.', reason=e.reason, processor_job=job_id)
break
except ProcessorJobError as e:
e.update_job(job)
logger.exception(e.failure_reason, processor_job=job.id, **e.context)
if e.success is False:
# end_job will use this and set the value
last_result['success'] = False
return end_job(last_result)
except Exception as e:
failure_reason = ("Unhandled exception caught while running processor"
" function {} in pipeline: ").format(processor.__name__)
logger.exception(failure_reason,
no_retry=job.no_retry,
processor_job=job_id)
last_result["success"] = False
last_result["job"].failure_reason = failure_reason + str(e)
return end_job(last_result)
if "success" in last_result and last_result["success"] is False:
logger.error("Processor function %s failed. Terminating pipeline.",
processor.__name__,
processor_job=job_id,
failure_reason=last_result["job"].failure_reason)
return end_job(last_result)
if last_result.get("abort", False):
return end_job(last_result, abort=True)
return last_result
class ProcessorJobError(Exception):
""" General processor job error class. """
def __init__(self, failure_reason, *, success=None, no_retry=None, **context):
super(ProcessorJobError, self).__init__(failure_reason)
self.failure_reason = failure_reason
self.success = success
self.no_retry = no_retry
# additional context to be included when logging
self.context = context
def update_job(self, job):
job.failure_reason = self.failure_reason
if self.success is not None:
job.success = self.success
if self.no_retry is not None:
job.no_retry = self.no_retry
job.save()
class ProcessorJobDeleteSelf(Exception):
""" Triggered when a processor job deletes itself. """
def __init__(self, reason):
super(ProcessorJobDeleteSelf, self).__init__(reason)
self.reason = reason
def get_os_distro():
"""Returns a string of OS distribution.
Since we are using Docker, this function only considers Linux distribution.
Alternative files on Linux: /etc/os-release, /etc/lsb-release
As a matter of fact, "/etc/issue" doesn't exist on Mac OS X. We can use
"sw_vers" command to find its OS information.
A more cross-platform solution is using "platform" module in Python.
"""
with open('/etc/issue') as distro_fh:
return distro_fh.readline().strip('\l\n\\n ')
def get_os_pkgs(pkg_list):
"""Returns a dictionay in which each key is the name of an os-lvel
package and the corresponding value is the package's version.
This function assumes the package manager is Debian-based (dpkg/apt).
"""
pkg_info = dict()
for pkg in pkg_list:
process_done = subprocess.run(['dpkg-query', '--show', pkg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("OS-level package %s not found: %s" %
(pkg, process_done.stderr.decode().strip())
)
version = process_done.stdout.decode().strip().split('\t')[-1]
pkg_info[pkg] = version
return pkg_info
def get_cmd_lines(cmd_list):
"""Returns a dictionary in which each key is a command string and
the corresponding value is the command's stripped output.
"""
cmd_info = dict()
for cmd in cmd_list:
process_done = subprocess.run(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("Failed to run command line '%s': %s" %
(cmd, process_done.stderr.decode().strip())
)
output_bytes = process_done.stdout
# Workaround for the "salmontools --version"
# command, whose outputs are sent to stderr instead of stdout.
# Alternatively, we could have used "stderr=subprocess.STDOUT" when
# initializing process_done, but it is probably a better idea to
# keep stdout and stderr separate.
base_cmd = cmd.strip().split()[0]
if base_cmd == "salmontools":
output_bytes = process_done.stderr
cmd_output = output_bytes.decode().strip()
cmd_info[cmd] = cmd_output
return cmd_info
def get_pip_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a pip-installed
package and the corresponding value is the package's version.
Instead of using: `pip show pkg | grep Version | awk '{print $2}'` to get
each package's version, we save the output of `pip freeze` first, then
check the version of each input package in pkg_list. This approach
launches the subprocess only once and (hopefully) saves some computational
resource.
"""
process_done = subprocess.run(['pip', 'freeze'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("'pip freeze' failed: %s" % process_done.stderr.decode().strip())
frozen_pkgs = dict()
for item in process_done.stdout.decode().split():
name, version = item.split("==")
frozen_pkgs[name] = version
pkg_info = dict()
for pkg in pkg_list:
try:
version = frozen_pkgs[pkg]
except KeyError:
raise Exception("Pip package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_bioc_version():
"""Returns a string that is the version of "Bioconductor" package in R.
Note that the data frame returned by installed.packages() does NOT include
a package named "Bioconductor", so we have to launch another R command to
find "Bioconductor" version.
"""
r_command = "tools:::.BioC_version_associated_with_R_version()"
process_done = subprocess.run(['Rscript', '-e', r_command],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception('R command failed to retrieve Bioconductor version: %s' %
process_done.stderr.decode().strip()
)
version = process_done.stdout.decode().strip().split()[-1]
version = version[1:-1] # Remove the leading and trailing non-ascii characters.
if len(version) == 0:
raise Exception('Bioconductor not found')
return version
def get_r_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a R package
and the corresponding value is the package's version.
"""
# Use "Rscript -e <R_commands>" command to get all user-installed R packages.
r_commands = "packages.df <- as.data.frame(installed.packages()[, c(1, 3:4)]); \
packages.df <- packages.df[is.na(packages.df$Priority), 1:2, drop=FALSE]; \
colnames(packages.df) <- NULL; \
print(packages.df, row.names=FALSE);"
process_done = subprocess.run(['Rscript', '-e', r_commands],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception('R command failed to retrieves installed packages: %s' %
process_done.stderr.decode().strip()
)
r_pkgs = dict()
for item in process_done.stdout.decode().strip().split('\n'):
name, version = item.strip().split()
r_pkgs[name] = version
# "Brainarray" is a collection that consists of 121 ".*ensgprobe" packages.
# They share the same version number, so we use 'hgu133plus2hsensgprobe'
# package to report this uniform version.
ba_proxy_pkg = 'hgu133plus2hsensgprobe'
pkg_info = dict()
for pkg in pkg_list:
if pkg == 'Bioconductor':
version = get_bioc_version()
else:
try:
version = r_pkgs[pkg] if pkg != "Brainarray" else r_pkgs[ba_proxy_pkg]
except KeyError:
raise Exception("R package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_checksums(filenames_list):
"""Returns a dictionary in which each key is a file's name and the
corresponding value is the file's md5 checksum.
"""
checksums = dict()
for filename in filenames_list:
abs_filepath = os.path.join(DIRNAME, filename)
process_done = subprocess.run(['md5sum', abs_filepath],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("md5sum command error:",
process_done.stderr.decode().strip())
checksum_str = process_done.stdout.decode().strip().split()[0]
checksums[filename] = checksum_str
return checksums
def get_runtime_env(yml_filename):
"""Reads input YAML filename and returns a dictionary in which each key
is a category name of runtime environment and the corresponding value
is an object that includes version information of packages listed in
that category.
"""
runtime_env = dict()
with open(yml_filename) as yml_fh:
pkgs = yaml.load(yml_fh)
for pkg_type, pkg_list in pkgs.items():
if pkg_type == 'os_distribution':
value = get_os_distro()
elif pkg_type == 'os_pkg':
value = get_os_pkgs(pkg_list)
elif pkg_type == 'cmd_line':
value = get_cmd_lines(pkg_list)
elif pkg_type == 'python':
value = get_pip_pkgs(pkg_list)
elif pkg_type == 'R':
value = get_r_pkgs(pkg_list)
elif pkg_type == 'checksum':
value = get_checksums(pkg_list)
else:
raise Exception("Unknown category in %s: %s" % (yml_filename, pkg_type))
runtime_env[pkg_type] = value
return runtime_env
def find_processor(enum_key):
"""Retursn either a newly created Processor record, or the one in
database that matches the current processor name, version and environment.
"""
name = ProcessorEnum[enum_key].value['name']
docker_image = ProcessorEnum[enum_key].value['docker_img']
# In current implementation, ALWAYS get the runtime environment.
yml_path = os.path.join(DIRNAME, ProcessorEnum[enum_key].value['yml_file'])
environment = get_runtime_env(yml_path)
obj, status = Processor.objects.get_or_create(name=name,
version=SYSTEM_VERSION,
docker_image=docker_image,
environment=environment)
return obj
def handle_processor_exception(job_context, processor_key, ex):
err_str = "Failed to set processor: %s" % ex
logger.error(err_str, job_id=job_context["job"].id, processor=processor_key)
job_context["job"].failure_reason = err_str
job_context["success"] = False
return job_context
<|code_end|>
|
Quantpendia command timmed out for MUS_MUSCULUS
Error:
```
ubuntu@ip-10-0-127-71:~$ docker logs inspiring_goldberg
2019-11-11 22:18:45,252 i-04bb28b499152d24f [volume: -1] data_refinery_foreman.foreman.management.commands.create_quantpendia INFO [organism: HOMO_SAPIENS] [job_id: 29380081]: Sending compendia job for Organism
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 85, in _execute
return self.cursor.execute(sql, params)
psycopg2.extensions.QueryCanceledError: canceling statement due to statement timeout
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "manage.py", line 23, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.5/dist-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.5/dist-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/home/user/data_refinery_foreman/foreman/management/commands/create_quantpendia.py", line 83, in handle
job = create_job_for_organism(organism)
File "/home/user/data_refinery_foreman/foreman/management/commands/create_quantpendia.py", line 34, in create_job_for_organism
data[experiment.accession_code] = list(samples_with_quantsf)
File "/usr/local/lib/python3.5/dist-packages/django/db/models/query.py", line 268, in __iter__
self._fetch_all()
File "/usr/local/lib/python3.5/dist-packages/django/db/models/query.py", line 1186, in _fetch_all
self._result_cache = list(self._iterable_class(self))
File "/usr/local/lib/python3.5/dist-packages/django/db/models/query.py", line 176, in __iter__
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
File "/usr/local/lib/python3.5/dist-packages/django/db/models/sql/compiler.py", line 1017, in results_iter
results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
File "/usr/local/lib/python3.5/dist-packages/django/db/models/sql/compiler.py", line 1065, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/python3.5/dist-packages/raven/contrib/django/client.py", line 127, in execute
return real_execute(self, sql, params)
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 68, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 77, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 85, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.5/dist-packages/django/db/utils.py", line 89, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 85, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: canceling statement due to statement timeout
```
Worked for human.
| foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py
<|code_start|>import sys
from django.core.management.base import BaseCommand
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (Dataset, Experiment, Organism,
ProcessorJob,
ProcessorJobDatasetAssociation)
from data_refinery_common.utils import queryset_iterator
logger = get_and_configure_logger(__name__)
def create_job_for_organism(organism: Organism):
"""Returns a quantpendia job for the provided organism."""
data = {}
experiments = Experiment.objects.filter(
organisms=organism,
samples__results__computedfile__filename='quant.sf'
)\
.distinct()
for experiment in queryset_iterator(experiments):
# only include the samples from the target organism that have quant.sf files
samples_with_quantsf = experiment.samples\
.filter(
organism=organism,
results__computedfile__filename='quant.sf'
)\
.values_list('accession_code', flat=True)\
.distinct()
data[experiment.accession_code] = list(samples_with_quantsf)
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_QUANTPENDIA.value
job.save()
dset = Dataset()
dset.data = data
dset.scale_by = 'NONE'
dset.aggregate_by = 'EXPERIMENT'
dset.quantile_normalize = False
dset.quant_sf_only = True
dset.svd_algorithm = 'NONE'
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
return job
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organisms",
type=str,
help=("Comma separated list of organism names."))
def handle(self, *args, **options):
"""Create a quantpendia for one or more organisms."""
all_organisms = Organism.objects.all().filter(qn_target__isnull=False)
if options["organisms"] is not None:
organisms = options["organisms"].upper().replace(" ", "_").split(",")
all_organisms = all_organisms.filter(name__in=organisms)
logger.debug('Generating quantpendia for organisms', organisms=all_organisms)
for organism in all_organisms:
# only generate the quantpendia for organisms that have some samples
# with quant.sf files.
has_quantsf_files = organism.sample_set\
.filter(results__computedfile__filename='quant.sf')\
.exists()
if not has_quantsf_files:
continue
job = create_job_for_organism(organism)
logger.info("Sending compendia job for Organism", job_id=str(job.pk), organism=str(organism))
send_job(ProcessorPipeline.CREATE_QUANTPENDIA, job)
sys.exit(0)
<|code_end|>
| foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py
<|code_start|>import sys
import time
from django.core.management.base import BaseCommand
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (Dataset, Experiment, Organism,
ProcessorJob,
ProcessorJobDatasetAssociation)
from data_refinery_common.utils import queryset_page_iterator
logger = get_and_configure_logger(__name__)
def create_job_for_organism(organism: Organism):
"""Returns a quantpendia job for the provided organism."""
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_QUANTPENDIA.value
job.save()
dset = Dataset()
dset.data = build_dataset(organism)
dset.scale_by = 'NONE'
dset.aggregate_by = 'EXPERIMENT'
dset.quantile_normalize = False
dset.quant_sf_only = True
dset.svd_algorithm = 'NONE'
dset.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = dset
pjda.save()
return job
def build_dataset(organism: Organism):
data = {}
experiments = Experiment.objects.filter(
organisms=organism,
technology='RNA-SEQ',
)\
.distinct()
for experiment_page in queryset_page_iterator(experiments):
for experiment in experiment_page:
# only include the samples from the target organism that have quant.sf files
experiment_samples = experiment.samples\
.filter(organism=organism, technology='RNA-SEQ')
# split the query into two so to avoid timeouts.
# assume processed rna-seq samples have a quant.sf file
processed_samples_with_quantsf = experiment_samples\
.filter(is_processed=True)\
.values_list('accession_code', flat=True)
# and only check for quant file for unprocessed samples
unprocessed_samples_with_quantsf = experiment_samples\
.filter(
is_processed=False,
results__computedfile__filename='quant.sf'
)\
.values_list('accession_code', flat=True)\
.distinct()
sample_accession_codes = list(processed_samples_with_quantsf) \
+ list(unprocessed_samples_with_quantsf)
if (sample_accession_codes):
data[experiment.accession_code] = sample_accession_codes
time.sleep(5)
return data
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--organisms",
type=str,
help=("Comma separated list of organism names."))
def handle(self, *args, **options):
"""Create a quantpendia for one or more organisms."""
all_organisms = Organism.objects.all().filter(qn_target__isnull=False)
if options["organisms"] is not None:
organisms = options["organisms"].upper().replace(" ", "_").split(",")
all_organisms = all_organisms.filter(name__in=organisms)
logger.debug('Generating quantpendia for organisms', organisms=all_organisms)
for organism in all_organisms:
# only generate the quantpendia for organisms that have some samples
# with quant.sf files.
has_quantsf_files = organism.sample_set\
.filter(technology='RNA-SEQ', results__computedfile__filename='quant.sf')\
.exists()
if not has_quantsf_files:
continue
job = create_job_for_organism(organism)
logger.info("Sending compendia job for Organism", job_id=str(job.pk), organism=str(organism))
send_job(ProcessorPipeline.CREATE_QUANTPENDIA, job)
sys.exit(0)
<|code_end|>
|
Add the biggest `failure_reason` to the dashboard?
### Context
I have been debugging some sample processing errors.
### Problem or idea
I think it would be useful to have a list of the top failure reasons and the number of samples that they affect. I'm not sure if the dashboard is a good place for this.
```
ccc | job_failure_reason
--------+-------------------------------------------------------------------------------------
646262 |
47680 | Salmon timed out because it failed to complete within 3 hours.
40654 | Encountered error in R code while running illumina.R pipeline during processing of
29566 | Unhandled exception caught while running processor function _prepare_files in pipel
26986 | Shell call to salmon failed because: ### salmon (mapping-based) v0.13.1 +
| ### [ progr
10347 | Missing file for processor job but unable to recreate downloader jobs!
8912 | Encountered error in R code while running AFFY_TO_PCL pipeline during processing of
3682 | The tx index for this organism is broken right now.
3010 | list index out of range
2629 | ProcessorJob has already completed with a fail - why are we here again? Bad Nomad!
2556 | Unhandled exception caught while running processor function start_job in pipeline:
2440 | Missing transcriptome index. (TRANSCRIPTOME_LONG)
1919 | Unhandled exception caught while running processor function _convert_genes in pipel
1886 | Missing transcriptome index. (TRANSCRIPTOME_SHORT)
1699 | Failed to download or extract transcriptome index for organism ARABIDOPSIS_THALIANA
1424 | Told to convert AFFY genes without an internal_accession - how did this happen?
1200 | Unhandled exception caught while running processor function _detect_platform in pip
1138 | Unhandled exception caught while running processor function _run_scan_upc in pipeli
868 | Encountered error in R code while running gene_convert_illumina.R pipeline during p
791 | Status code -9 from gene_convert.R: b"\nAttaching package: 'dplyr'\n\nThe following
(20 rows)
```
That's the top 20 failure reasons with a total of `835,649` potential samples. It's also very strange that there are so many samples with no `failure_reason` on their last processor job.
<details><summary>query</summary>
```sql
select count(*) as ccc, (
select left(processor_jobs.failure_reason, 83)
from original_file_sample_associations, processorjob_originalfile_associations, processor_jobs
where samples.id=original_file_sample_associations.sample_id
and original_file_sample_associations.original_file_id=processorjob_originalfile_associations.original_file_id
and processorjob_originalfile_associations.processor_job_id=processor_jobs.id
order by processor_jobs.start_time desc nulls last
limit 1
) as job_failure_reason
from samples
where samples.is_processed='f'
group by job_failure_reason
order by ccc desc
limit 20;
```
</details>
I have been working on both:
```
8912 | Encountered error in R code while running AFFY_TO_PCL pipeline during processing of
2629 | ProcessorJob has already completed with a fail - why are we here again? Bad Nomad!
```
Maybe we can re-queue some of the samples with these errors? With https://github.com/AlexsLemonade/refinebio/pull/1541
### Solution or next step
Should we add this to the dashboard?
| api/data_refinery_api/urls.py
<|code_start|>from rest_framework.routers import DefaultRouter
from django.conf.urls import url, include
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from django.views.generic import RedirectView
from .views import (
ExperimentDocumentView,
ExperimentList,
ExperimentDetail,
SampleList,
SampleDetail,
OrganismList,
PlatformList,
InstitutionList,
SurveyJobList,
DownloaderJobList,
ProcessorJobList,
ComputationalResultsList,
ProcessorList,
Stats,
CreateDatasetView,
DatasetView,
CreateApiTokenView,
APITokenView,
TranscriptomeIndexList,
TranscriptomeIndexDetail,
QNTargetsDetail,
QNTargetsAvailable,
CompendiumResultList,
CompendiumResultDetails,
ComputedFilesList,
OriginalFileList,
AboutStats
)
# This provides _public_ access to the /admin interface!
# Enabling this by setting DEBUG to true this will allow unauthenticated access to the admin interface.
# Very useful for debugging (since we have no User accounts), but very dangerous for prod!
class AccessUser:
has_module_perms = has_perm = __getattr__ = lambda s, *a, **kw: True
if settings.DEBUG:
admin.site.has_permission = lambda r: setattr(r, 'user', AccessUser()) or True
schema_view = get_schema_view(
openapi.Info(
title="Refine.bio API",
default_version='v1',
description="""
refine.bio is a multi-organism collection of genome-wide transcriptome or gene expression data that has been obtained from publicly available repositories and uniformly processed and normalized. refine.bio allows biologists, clinicians, and machine learning researchers to search for experiments from different source repositories all in one place and build custom data sets for their questions of interest.
The swagger-ui view can be found [here](http://api.refine.bio/swagger/).
The ReDoc view can be found [here](http://api.refine.bio/).
Additional documentation can be found at [docs.refine.bio](http://docs.refine.bio/en/latest/).
### Questions/Feedback?
If you have a question or comment, please [file an issue on GitHub](https://github.com/AlexsLemonade/refinebio/issues) or send us an email at [ccdl@alexslemonade.org](mailto:ccdl@alexslemonade.org).
""",
terms_of_service="https://www.refine.bio/terms",
contact=openapi.Contact(email="ccdl@alexslemonade.org"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(r'^(?P<version>v1)/', include([
# Primary search and filter interface
url(r'^search/$', ExperimentDocumentView.as_view({'get': 'list'}), name='search'),
url(r'^experiments/$', ExperimentList.as_view(), name='experiments'),
url(r'^experiments/(?P<accession_code>.+)/$', ExperimentDetail.as_view(), name='experiments_detail'),
url(r'^samples/$', SampleList.as_view(), name='samples'),
url(r'^samples/(?P<accession_code>.+)/$', SampleDetail.as_view(), name='samples_detail'),
url(r'^organisms/$', OrganismList.as_view(), name='organisms'),
url(r'^platforms/$', PlatformList.as_view(), name='platforms'),
url(r'^institutions/$', InstitutionList.as_view(), name='institutions'),
url(r'^processors/$', ProcessorList.as_view(), name='processors'),
# Deliverables
url(r'^dataset/$', CreateDatasetView.as_view(), name='create_dataset'),
url(r'^dataset/(?P<id>[0-9a-f-]+)/$', DatasetView.as_view(), name='dataset'),
url(r'^token/$', CreateApiTokenView.as_view(), name='token'),
url(r'^token/(?P<id>[0-9a-f-]+)/$', APITokenView.as_view(), name='token_id'),
# Jobs
url(r'^jobs/survey/$', SurveyJobList.as_view(), name='survey_jobs'),
url(r'^jobs/downloader/$', DownloaderJobList.as_view(), name='downloader_jobs'),
url(r'^jobs/processor/$', ProcessorJobList.as_view(), name='processor_jobs'),
# Dashboard Driver
url(r'^stats/$', Stats.as_view(), name='stats'),
url(r'^stats-about/$', AboutStats.as_view(), name='stats_about'),
# Transcriptome Indices and QN Targets
url(r'^transcriptome_indices/$', TranscriptomeIndexList.as_view(), name='transcriptome_indices'),
url(r'^transcriptome_indices/(?P<organism_name>.+)$', TranscriptomeIndexDetail.as_view(), name='transcriptome_indices_read'),
url(r'^qn_targets/$', QNTargetsAvailable.as_view(), name='qn_targets_available'),
url(r'^qn_targets/(?P<organism_name>.+)$', QNTargetsDetail.as_view(), name='qn_targets'),
# Computed Files
url(r'^computed_files/$', ComputedFilesList.as_view(), name='computed_files'),
url(r'^original_files/$', OriginalFileList.as_view(), name='original_files'),
url(r'^computational_results/$', ComputationalResultsList.as_view(), name='results'),
# Compendia
url(r'^compendia/$', CompendiumResultList.as_view(), name='compendium_results'),
url(r'^compendia/(?P<id>[0-9]+)/$', CompendiumResultDetails.as_view(), name='compendium_result'),
# v1 api docs
url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema_swagger_ui'),
url(r'^$', schema_view.with_ui('redoc', cache_timeout=0), name='schema_redoc'),
])),
# Admin
url(r'^admin/', admin.site.urls),
# Redirect root urls to latest version api docs
url(r'^swagger/$', RedirectView.as_view(url="/v1/swagger")),
url(r'^$', RedirectView.as_view(url="/v1")),
]
# This adds support explicitly typed endpoints such that appending '.json' returns that application type.
urlpatterns = format_suffix_patterns(urlpatterns)
<|code_end|>
api/data_refinery_api/views.py
<|code_start|>from datetime import timedelta, datetime
import requests
import nomad
from typing import Dict
from itertools import groupby
from re import match
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.db.models import Count, Prefetch, DateTimeField, OuterRef, Subquery
from django.db.models.functions import Trunc
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import F, Q
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_TERMS,
LOOKUP_FILTER_RANGE,
LOOKUP_FILTER_PREFIX,
LOOKUP_FILTER_WILDCARD,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_GTE,
LOOKUP_QUERY_LT,
LOOKUP_QUERY_LTE,
LOOKUP_QUERY_EXCLUDE,
)
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
IdsFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackend
)
from django_filters.rest_framework import DjangoFilterBackend
import django_filters
from elasticsearch_dsl import TermsFacet, DateHistogramFacet
from rest_framework import status, filters, generics, mixins
from rest_framework.exceptions import APIException, NotFound
from rest_framework.exceptions import ValidationError
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from data_refinery_api.serializers import (
ComputationalResultSerializer,
ComputationalResultWithUrlSerializer,
DetailedExperimentSerializer,
DetailedSampleSerializer,
ExperimentSerializer,
InstitutionSerializer,
OrganismIndexSerializer,
OrganismSerializer,
PlatformSerializer,
ProcessorSerializer,
SampleSerializer,
CompendiumResultSerializer,
CompendiumResultWithUrlSerializer,
QNTargetSerializer,
ComputedFileListSerializer,
OriginalFileListSerializer,
# Job
DownloaderJobSerializer,
ProcessorJobSerializer,
SurveyJobSerializer,
# Dataset
APITokenSerializer,
CreateDatasetSerializer,
DatasetSerializer,
)
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
ComputationalResult,
ComputationalResultAnnotation,
CompendiumResult,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SurveyJob,
)
from data_refinery_common.models.documents import (
ExperimentDocument
)
from data_refinery_common.utils import (
get_env_variable,
get_active_volumes,
get_nomad_jobs_breakdown,
get_nomad_jobs
)
from data_refinery_common.logging import get_and_configure_logger
from .serializers import ExperimentDocumentSerializer
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
logger = get_and_configure_logger(__name__)
##
# Variables
##
JOB_CREATED_AT_CUTOFF = datetime(2019, 6, 5, tzinfo=timezone.utc)
##
# ElasticSearch
##
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from six import iteritems
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet['facet'].get_aggregation()
queryset.aggs.bucket(field, agg)\
.metric('total_samples', 'cardinality', field='downloadable_samples', precision_threshold=40000)
return queryset
##
# ElasticSearch powered Search and Filter
##
@method_decorator(name='list', decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name='technology', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name='has_publication', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name='platform', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name='organism', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name='num_processed_samples', in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
"""))
class ExperimentDocumentView(DocumentViewSet):
""" ElasticSearch powered experiment search. """
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended
]
# Primitive
lookup_field = 'id'
# Define search fields
# Is this exhaustive enough?
search_fields = {
'title': {'boost': 10},
'publication_authors': {'boost': 8}, # "People will search themselves"
'publication_title': {'boost': 5},
'submitter_institution': {'boost': 3},
'description': {'boost': 2},
'accession_code': None,
'alternate_accession_code': None,
'publication_doi': None,
'pubmed_id': None,
'sample_metadata_fields': None,
'platform_names': None
}
# Define filtering fields
filter_fields = {
'id': {
'field': '_id',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN
],
},
'technology': 'technology',
'has_publication': 'has_publication',
'platform': 'platform_accession_codes',
'organism': 'organism_names',
'num_processed_samples': {
'field': 'num_processed_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
},
'num_downloadable_samples': {
'field': 'num_downloadable_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
}
}
# Define ordering fields
ordering_fields = {
'id': 'id',
'title': 'title.raw',
'description': 'description.raw',
'num_total_samples': 'num_total_samples',
'num_downloadable_samples': 'num_downloadable_samples',
'source_first_published': 'source_first_published'
}
# Specify default ordering
ordering = ('_score', '-num_total_samples', 'id', 'title', 'description', '-source_first_published')
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
'technology': {
'field': 'technology',
'facet': TermsFacet,
'enabled': True # These are enabled by default, which is more expensive but more simple.
},
'organism_names': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': True,
'options': {
'size': 999999
}
},
'platform_accession_codes': {
'field': 'platform_accession_codes',
'facet': TermsFacet,
'enabled': True,
'global': False,
'options': {
'size': 999999
}
},
'has_publication': {
'field': 'has_publication',
'facet': TermsFacet,
'enabled': True,
'global': False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
'technology_global': {
'field': 'technology',
'facet': TermsFacet,
'enabled': False,
'global': True
},
'organism_names_global': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'platform_names_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'has_publication_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
},
}
faceted_search_param = 'facet'
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data['facets'] = self.transform_es_facets(response.data['facets'])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet['buckets']:
if field == 'has_publication':
filter_group[bucket['key_as_string']] = bucket['total_samples']['value']
else:
filter_group[bucket['key']] = bucket['total_samples']['value']
result[field] = filter_group
return result
##
# Dataset
##
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(name='get', decorator=swagger_auto_schema(operation_description="View a single Dataset.",manual_parameters=[
openapi.Parameter(
name='details', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)]))
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None)) # partial updates not supported
@method_decorator(name='put', decorator=swagger_auto_schema(operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""))
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = 'id'
@staticmethod
def _should_display_on_engagement_bot(email: str) -> bool:
return email is not None \
and email.find("cansav09") != 0 \
and email.find("arielsvn") != 0 \
and email.find("jaclyn.n.taroni") != 0 \
and email.find("kurt.wheeler") != 0 \
and email.find("greenescientist") != 0 \
and email.find("@alexslemonade.org") == -1 \
and email.find("miserlou") != 0 \
and email.find("michael.zietz@gmail.com") != 0 \
and email.find("d.prasad") != 0 \
and email.find("daniel.himmelstein@gmail.com") != 0 \
and email.find("dv.prasad991@gmail.com") != 0
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that. """
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data['data'].keys():
accessions = new_data['data'][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(experiment.samples.filter(is_processed=True, organism__in=qn_organisms).values_list('accession_code', flat=True))
new_data['data'][key] = sample_codes
if old_object.is_processed:
raise APIException("You may not update Datasets which have already been processed")
if new_data.get('start'):
# Make sure we have a valid activated token.
token_id = self.request.data.get('token_id', None)
if not token_id:
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
raise APIException("You must provide an active API token ID")
supplied_email_address = self.request.data.get('email_address', None)
email_ccdl_ok = self.request.data.get('email_ccdl_ok', False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if supplied_email_address is not None:
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get('no_send_job', False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException("Unable to queue download job. Something has gone"
" wrong and we have been notified about it.")
serializer.validated_data['is_processing'] = True
obj = serializer.save()
if settings.RUNNING_IN_CLOUD and settings.ENGAGEMENTBOT_WEBHOOK is not None \
and DatasetView._should_display_on_engagement_bot(supplied_email_address):
try:
try:
remote_ip = get_client_ip(self.request)
city = requests.get('https://ipapi.co/' + remote_ip + '/json/', timeout=10).json()['city']
except Exception:
city = "COULD_NOT_DETERMINE"
new_user_text = "New user " + supplied_email_address + " from " + city + " [" + remote_ip + "] downloaded a dataset! (" + str(old_object.id) + ")"
webhook_url = settings.ENGAGEMENTBOT_WEBHOOK
slack_json = {
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments":[
{ "color": "good",
"text": new_user_text
}
]
}
response = requests.post(
webhook_url,
json=slack_json,
headers={'Content-Type': 'application/json'},
timeout=10
)
except Exception as e:
# It doens't really matter if this didn't work
logger.error(e)
pass
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data['data'] = old_data
serializer.validated_data['aggregate_by'] = old_aggregate
serializer.save()
class CreateApiTokenView(generics.CreateAPIView):
"""
token_create
This endpoint can be used to create and activate tokens. These tokens can be used
in requests that provide urls to download computed files. They are a way to accept
our terms of service.
```py
import requests
import json
response = requests.post('https://api.refine.bio/v1/token/')
token_id = response.json()['id']
response = requests.put('https://api.refine.bio/v1/token/' + token_id + '/', json.dumps({'is_activated': True}), headers={'Content-Type': 'application/json'})
```
The token id needs to be provided in the HTTP request in the API-KEY header.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
model = APIToken
serializer_class = APITokenSerializer
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None))
class APITokenView(generics.RetrieveUpdateAPIView):
"""
Read and modify Api Tokens.
get:
Return details about a specific token.
put:
This can be used to activate a specific token by sending `is_activated: true`.
"""
model = APIToken
lookup_field = 'id'
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
##
# Experiments
##
class ExperimentList(generics.ListAPIView):
""" Paginated list of all experiments. Advanced filtering can be done with the `/search` endpoint. """
model = Experiment
queryset = Experiment.public_objects.all()
serializer_class = ExperimentSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'description',
'accession_code',
'alternate_accession_code',
'source_database',
'source_url',
'has_publication',
'publication_title',
'publication_doi',
'pubmed_id',
'organisms',
'submitter_institution',
'created_at',
'last_modified',
'source_first_published',
'source_last_modified',
)
class ExperimentDetail(generics.RetrieveAPIView):
""" Retrieve details for an experiment given it's accession code """
lookup_field = "accession_code"
queryset = Experiment.public_objects.all()
serializer_class = DetailedExperimentSerializer
##
# Samples
##
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='dataset_id', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns samples that are added to a dataset.",
),
openapi.Parameter(
name='experiment_accession_code', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns only the samples associated with an experiment accession code.",
),
openapi.Parameter(
name='accession_codes', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Provide a list of sample accession codes separated by commas and the endpoint will only return information about these samples.",
),
]))
class SampleList(generics.ListAPIView):
""" Returns detailed information about Samples """
model = Sample
serializer_class = DetailedSampleSerializer
filter_backends = (filters.OrderingFilter, DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('-is_processed')
filterset_fields = (
'title',
'organism',
'source_database',
'source_archive_url',
'has_raw',
'platform_name',
'technology',
'manufacturer',
'sex',
'age',
'specimen_part',
'genotype',
'disease',
'disease_stage',
'cell_line',
'treatment',
'race',
'subject',
'compound',
'time',
'is_processed',
'is_public'
)
def get_queryset(self):
"""
ref https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
"""
queryset = Sample.public_objects \
.prefetch_related('organism') \
.prefetch_related('results') \
.prefetch_related('results__processor') \
.prefetch_related('results__computationalresultannotation_set') \
.prefetch_related('results__computedfile_set') \
.filter(**self.get_query_params_filters())
# case insensitive search https://docs.djangoproject.com/en/2.1/ref/models/querysets/#icontains
filter_by = self.request.query_params.get('filter_by', None)
if filter_by:
queryset = queryset.filter(Q(accession_code__icontains=filter_by) |
Q(title__icontains=filter_by) |
Q(sex__icontains=filter_by) |
Q(age__icontains=filter_by) |
Q(specimen_part__icontains=filter_by) |
Q(genotype__icontains=filter_by) |
Q(disease__icontains=filter_by) |
Q(disease_stage__icontains=filter_by) |
Q(cell_line__icontains=filter_by) |
Q(treatment__icontains=filter_by) |
Q(race__icontains=filter_by) |
Q(subject__icontains=filter_by) |
Q(compound__icontains=filter_by) |
Q(time__icontains=filter_by))
return queryset
def get_query_params_filters(self):
""" We do advanced filtering on the queryset depending on the query parameters.
This returns the parameters that should be used for that. """
filter_dict = dict()
ids = self.request.query_params.get('ids', None)
if ids is not None:
ids = [int(x) for x in ids.split(',')]
filter_dict['pk__in'] = ids
experiment_accession_code = self.request.query_params.get('experiment_accession_code', None)
if experiment_accession_code:
experiment = get_object_or_404(Experiment.objects.values('id'), accession_code=experiment_accession_code)
filter_dict['experiments__in'] = [experiment['id']]
accession_codes = self.request.query_params.get('accession_codes', None)
if accession_codes:
accession_codes = accession_codes.split(',')
filter_dict['accession_code__in'] = accession_codes
dataset_id = self.request.query_params.get('dataset_id', None)
if dataset_id:
dataset = get_object_or_404(Dataset, id=dataset_id)
# Python doesn't provide a prettier way of doing this that I know about.
filter_dict['accession_code__in'] = [item for sublist in dataset.data.values() for item in sublist]
# Accept Organism in both name and ID form
organism_name = self.request.query_params.get('organism__name', None)
if organism_name:
filter_dict['organism__name'] = organism_name
return filter_dict
class SampleDetail(generics.RetrieveAPIView):
""" Retrieve the details for a Sample given it's accession code """
lookup_field = "accession_code"
queryset = Sample.public_objects.all()
serializer_class = DetailedSampleSerializer
##
# Processor
##
class ProcessorList(generics.ListAPIView):
"""List all processors."""
queryset = Processor.objects.all()
serializer_class = ProcessorSerializer
##
# Results
##
class ComputationalResultsList(generics.ListAPIView):
"""
computational_results_list
This lists all `ComputationalResult`. Each one contains meta-information about the output of a computer process. (Ex Salmon).
This can return valid S3 urls if a valid [token](#tag/token) is sent in the header `HTTP_API_KEY`.
"""
queryset = ComputationalResult.public_objects.all()
def get_serializer_class(self):
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return ComputationalResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return ComputationalResultSerializer
def filter_queryset(self, queryset):
filter_dict = self.request.query_params.dict()
filter_dict.pop('limit', None)
filter_dict.pop('offset', None)
return queryset.filter(**filter_dict)
##
# Search Filter Models
##
class OrganismList(generics.ListAPIView):
"""
Unpaginated list of all the available organisms.
"""
queryset = Organism.objects.all()
serializer_class = OrganismSerializer
paginator = None
class PlatformList(generics.ListAPIView):
"""
Unpaginated list of all the available "platform" information
"""
serializer_class = PlatformSerializer
paginator = None
def get_queryset(self):
return Sample.public_objects.all().values("platform_accession_code", "platform_name").distinct()
class InstitutionList(generics.ListAPIView):
"""
Unpaginated list of all the available "institution" information
"""
serializer_class = InstitutionSerializer
paginator = None
def get_queryset(self):
return Experiment.public_objects.all().values("submitter_institution").distinct()
##
# Jobs
##
class SurveyJobList(generics.ListAPIView):
"""
List of all SurveyJob.
"""
model = SurveyJob
queryset = SurveyJob.objects.all()
serializer_class = SurveyJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = SurveyJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='sample_accession_code', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description='List the downloader jobs associated with a sample',
),
openapi.Parameter(
name='nomad', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description='Only return jobs that are in the nomad queue currently',
),
]))
class DownloaderJobList(generics.ListAPIView):
"""
List of all DownloaderJob
"""
model = DownloaderJob
serializer_class = DownloaderJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = DownloaderJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
def get_queryset(self):
queryset = DownloaderJob.objects.all()
sample_accession_code = self.request.query_params.get('sample_accession_code', None)
if sample_accession_code:
queryset = queryset.filter(original_files__samples__accession_code=sample_accession_code).distinct()
nomad = self.request.query_params.get('nomad', None)
if nomad:
nomad_jobs_ids = [job['ID'] for job in get_nomad_jobs()]
queryset = queryset.filter(nomad_job_id__in=nomad_jobs_ids)
return queryset
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='sample_accession_code', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description='List the processor jobs associated with a sample',
),
openapi.Parameter(
name='nomad', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description='Only return jobs that are in the nomad queue currently',
),
]))
class ProcessorJobList(generics.ListAPIView):
"""
List of all ProcessorJobs.
"""
model = ProcessorJob
serializer_class = ProcessorJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = ProcessorJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
def get_queryset(self):
queryset = ProcessorJob.objects.all()
sample_accession_code = self.request.query_params.get('sample_accession_code', None)
if sample_accession_code:
queryset = queryset.filter(original_files__samples__accession_code=sample_accession_code).distinct()
nomad = self.request.query_params.get('nomad', None)
if nomad:
nomad_jobs_ids = [job['ID'] for job in get_nomad_jobs()]
queryset = queryset.filter(nomad_job_id__in=nomad_jobs_ids)
return queryset
###
# Statistics
###
class AboutStats(APIView):
""" Returns general stats for the site, used in the about page """
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
# static values for now
dummy = request.query_params.dict().pop('dummy', None)
if dummy:
# add a dummy response, calculated these on 09/25/2019
result = {
'samples_available': 904953 + 391022,
'total_size_in_bytes': 832195361132962,
'supported_organisms': 43 + 159,
'experiments_processed': 35785 + 8661
}
return Response(result)
result = {
'samples_available': self._get_samples_available(),
'total_size_in_bytes': OriginalFile.objects.aggregate(total_size=Sum('size_in_bytes'))['total_size'],
'supported_organisms': self._get_supported_organisms(),
'experiments_processed': self._get_experiments_processed()
}
return Response(result)
def _get_experiments_processed(self):
""" total experiments with at least one sample processed """
experiments_with_sample_processed = Experiment.objects\
.annotate(
processed_samples_count=Count('samples', filter=Q(samples__is_processed=True)),
)\
.filter(Q(processed_samples_count__gt=1))\
.count()
experiments_with_sample_quant = ComputedFile.objects\
.filter(filename='quant.sf', result__samples__is_processed=False)\
.values_list('result__samples__experiments', flat=True)\
.distinct()\
.count()
return experiments_with_sample_processed + experiments_with_sample_quant
def _get_supported_organisms(self):
""" count organisms with qn targets or that have at least one sample with quant files """
organisms_with_qn_targets = Organism.objects.filter(qn_target__isnull=False).count()
organisms_without_qn_targets = Organism.objects.filter(
qn_target__isnull=True,
sample__is_processed=False,
sample__technology='RNA-SEQ',
sample__results__computedfile__filename='quant.sf'
).distinct().count()
return organisms_with_qn_targets + organisms_without_qn_targets
def _get_samples_available(self):
""" count the total number of samples that are processed or that have a quant.sf file associated with them """
processed_samples = Sample.objects.filter(is_processed=True).count()
unprocessed_samples_with_quant = Sample.objects.filter(
is_processed=False,
technology='RNA-SEQ',
results__computedfile__filename='quant.sf'
).distinct().count()
return processed_samples + unprocessed_samples_with_quant
class Stats(APIView):
""" Statistics about the health of the system. """
@swagger_auto_schema(manual_parameters=[openapi.Parameter(
name='range', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=('day', 'week', 'month', 'year',)
)])
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop('range', None)
cached_stats = Stats.calculate_stats(range_param)
return Response(cached_stats)
@classmethod
def calculate_stats(cls, range_param):
data = {}
data['generated_on'] = timezone.now()
data['survey_jobs'] = cls._get_job_stats(SurveyJob.objects, range_param)
data['downloader_jobs'] = cls._get_job_stats(DownloaderJob.objects, range_param)
data['processor_jobs'] = cls._get_job_stats(ProcessorJob.objects, range_param)
data['experiments'] = cls._get_object_stats(Experiment.objects, range_param)
# processed and unprocessed samples stats
data['unprocessed_samples'] = cls._get_object_stats(Sample.objects.filter(is_processed=False), range_param, 'last_modified')
data['processed_samples'] = cls._get_object_stats(Sample.processed_objects, range_param, 'last_modified')
data['processed_samples']['last_hour'] = cls._samples_processed_last_hour()
data['processed_samples']['technology'] = {}
techs = Sample.processed_objects.values('technology').annotate(count=Count('technology'))
for tech in techs:
if not tech['technology'] or not tech['technology'].strip():
continue
data['processed_samples']['technology'][tech['technology']] = tech['count']
data['processed_samples']['organism'] = {}
organisms = Sample.processed_objects.values('organism__name').annotate(count=Count('organism__name'))
for organism in organisms:
if not organism['organism__name']:
continue
data['processed_samples']['organism'][organism['organism__name']] = organism['count']
data['processed_experiments'] = cls._get_object_stats(Experiment.processed_public_objects)
data['active_volumes'] = list(get_active_volumes())
data['dataset'] = cls._get_dataset_stats(range_param)
if range_param:
data['input_data_size'] = cls._get_input_data_size()
data['output_data_size'] = cls._get_output_data_size()
data.update(get_nomad_jobs_breakdown())
return data
EMAIL_USERNAME_BLACKLIST = [
'arielsvn',
'cansav09',
'd.prasad',
'daniel.himmelstein',
'dv.prasad991',
'greenescientist',
'jaclyn.n.taroni',
'kurt.wheeler91',
'michael.zietz',
'miserlou'
]
@classmethod
def _get_dataset_stats(cls, range_param):
"""Returns stats for processed datasets"""
filter_query = Q()
for username in Stats.EMAIL_USERNAME_BLACKLIST:
filter_query = filter_query | Q(email_address__startswith=username)
filter_query = filter_query | Q(email_address__endswith='@alexslemonade.org')
processed_datasets = Dataset.objects.filter(is_processed=True, email_address__isnull=False).exclude(filter_query)
result = processed_datasets.aggregate(
total=Count('id'),
aggregated_by_experiment=Count('id', filter=Q(aggregate_by='EXPERIMENT')),
aggregated_by_species=Count('id', filter=Q(aggregate_by='SPECIES')),
scale_by_none=Count('id', filter=Q(scale_by='NONE')),
scale_by_minmax=Count('id', filter=Q(scale_by='MINMAX')),
scale_by_standard=Count('id', filter=Q(scale_by='STANDARD')),
scale_by_robust=Count('id', filter=Q(scale_by='ROBUST')),
)
if range_param:
# We don't save the dates when datasets are processed, but we can use
# `last_modified`, since datasets aren't modified again after they are processed
result['timeline'] = cls._get_intervals(
processed_datasets,
range_param,
'last_modified'
).annotate(
total=Count('id'),
total_size=Sum('size_in_bytes')
)
return result
@classmethod
def _samples_processed_last_hour(cls):
current_date = datetime.now(tz=timezone.utc)
start = current_date - timedelta(hours=1)
return Sample.processed_objects.filter(last_modified__range=(start, current_date)).count()
@classmethod
def _get_input_data_size(cls):
total_size = OriginalFile.objects.filter(
sample__is_processed=True # <-- SLOW
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_output_data_size(cls):
total_size = ComputedFile.public_objects.all().filter(
s3_bucket__isnull=False,
s3_key__isnull=True
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_job_stats(cls, jobs, range_param):
start_filter = Q()
if range_param:
current_date = datetime.now(tz=timezone.utc)
start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}.get(range_param)
start_filter = start_filter | Q(start_time__gte=start_date) | Q(start_time__isnull=True)
result = jobs.filter(start_filter).aggregate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
open=Count('id', filter=Q(start_time__isnull=False,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
)
# via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
result['average_time'] = jobs.filter(start_filter).filter(
start_time__isnull=False,
end_time__isnull=False,
success=True
).aggregate(
average_time=Avg(F('end_time') - F('start_time'))
)['average_time']
if not result['average_time']:
result['average_time'] = 0
else:
result['average_time'] = result['average_time'].total_seconds()
if range_param:
result['timeline'] = cls._get_intervals(jobs, range_param) \
.annotate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True, success__isnull=True)),
open=Count('id', filter=Q(start_time__isnull=False, success__isnull=True)),
)
return result
@classmethod
def _get_object_stats(cls, objects, range_param=False, field='created_at'):
result = {
'total': objects.count()
}
if range_param:
result['timeline'] = cls._get_intervals(objects, range_param, field)\
.annotate(total=Count('id'))
return result
@classmethod
def _get_intervals(cls, objects, range_param, field='last_modified'):
range_to_trunc = {
'day': 'hour',
'week': 'day',
'month': 'day',
'year': 'month'
}
current_date = datetime.now(tz=timezone.utc)
range_to_start_date = {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}
# truncate the `last_modified` field by hour, day or month depending on the `range` param
# and annotate each object with that. This will allow us to count the number of objects
# on each interval with a single query
# ref https://stackoverflow.com/a/38359913/763705
return objects.annotate(start=Trunc(field, range_to_trunc.get(range_param), output_field=DateTimeField())) \
.values('start') \
.filter(start__gte=range_to_start_date.get(range_param))
###
# Transcriptome Indices
###
class TranscriptomeIndexList(generics.ListAPIView):
""" List all Transcriptome Indices. These are a special type of process result, necessary for processing other SRA samples. """
serializer_class = OrganismIndexSerializer
def get_queryset(self):
return OrganismIndex.objects.distinct("organism", "index_type")
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name='length', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="",
enum=('short', 'long',),
default='short'
),
]))
class TranscriptomeIndexDetail(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
def get_object(self):
organism_name = self.kwargs['organism_name'].upper()
length = self.request.query_params.get('length', 'short')
# Get the correct organism index object, serialize it, and return it
transcription_length = "TRANSCRIPTOME_" + length.upper()
try:
organism = Organism.objects.get(name=organism_name.upper())
organism_index = OrganismIndex.objects.exclude(s3_url__exact="")\
.distinct("organism", "index_type")\
.get(organism=organism, index_type=transcription_length)
return organism_index
except OrganismIndex.DoesNotExist:
raise Http404('Organism does not exists')
###
# Compendia
###
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='latest_version', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="`True` will only return the highest `compendium_version` for each primary_organism.",
),
openapi.Parameter(
name='quant_sf_only', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="`True` for RNA-seq Sample Compendium results or `False` for quantile normalized.",
),
]))
class CompendiumResultList(generics.ListAPIView):
"""
List all CompendiaResults with filtering.
"""
model = CompendiumResult
queryset = CompendiumResult.objects.all()
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = ['primary_organism__name', 'compendium_version', 'quant_sf_only']
ordering_fields = ('primary_organism__name', 'compendium_version', 'id')
ordering = ('-primary_organism__name',)
def get_queryset(self):
public_result_queryset = CompendiumResult.objects.filter(result__is_public=True)
latest_version = self.request.query_params.get('latest_version', False)
if latest_version:
version_filter = Q(primary_organism=OuterRef('primary_organism'),
quant_sf_only=OuterRef('quant_sf_only'))
latest_version = CompendiumResult.objects.filter(version_filter)\
.order_by('-compendium_version')\
.values('compendium_version')
return public_result_queryset.annotate(
latest_version=Subquery(latest_version[:1])
).filter(compendium_version=F('latest_version'))
return public_result_queryset
def get_serializer_class(self):
try:
token_id = self.request.META.get('HTTP_API_KEY', None)
token = APIToken.objects.get(id=token_id, is_activated=True)
return CompendiumResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return CompendiumResultSerializer
class CompendiumResultDetails(generics.RetrieveAPIView):
"""
Get a specific Compendium Result
"""
model = CompendiumResult
queryset = CompendiumResult.objects.filter(is_public=True)
lookup_field = 'id'
def get_serializer_class(self):
try:
token_id = self.request.META.get('HTTP_API_KEY', None)
token = APIToken.objects.get(id=token_id, is_activated=True)
return CompendiumResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return CompendiumResultSerializer
###
# QN Targets
###
class QNTargetsAvailable(generics.ListAPIView):
"""
This is a list of all of the organisms which have available QN Targets
"""
serializer_class = OrganismSerializer
paginator = None
def get_queryset(self):
return Organism.get_objects_with_qn_targets()
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Eg `DANIO_RERIO`, `MUS_MUSCULUS`",
)], responses={404: 'QN Target not found for the given organism.'}))
class QNTargetsDetail(generics.RetrieveAPIView):
"""
Get a detailed view of the Quantile Normalization file for an organism.
"""
serializer_class = QNTargetSerializer
def get_object(self):
organism = self.kwargs['organism_name']
organism = organism.upper().replace(" ", "_")
try:
organism_id = Organism.get_object_for_name(organism).id
annotation = ComputationalResultAnnotation.objects.filter(
data__organism_id=organism_id,
data__is_qn=True
).order_by(
'-created_at'
).first()
qn_target = annotation.result.computedfile_set.first()
except Exception:
raise NotFound("Don't have a target for that organism!")
if not qn_target:
raise NotFound("Don't have a target for that organism!!")
return qn_target
##
# Computed Files
##
class ComputedFilesList(generics.ListAPIView):
"""
computed_files_list
ComputedFiles are representation of files created by data-refinery processes.
This can also be used to fetch all the compendia files we have generated with:
```
GET /computed_files?is_compendia=True&is_public=True
```
"""
queryset = ComputedFile.objects.all()
serializer_class = ComputedFileListSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = (
'id',
'samples',
'is_qn_target',
'is_smashable',
'is_qc',
'is_compendia',
'quant_sf_only',
'svd_algorithm',
'compendia_version',
'created_at',
'last_modified',
)
ordering_fields = ('id', 'created_at', 'last_modified', 'compendia_version',)
ordering = ('-id',)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(ComputedFilesList, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
class OriginalFileList(generics.ListAPIView):
"""
original_files_list
List Original Files that are associated with Samples. These are the files we proccess.
"""
queryset = OriginalFile.objects.all()
serializer_class = OriginalFileListSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = OriginalFileListSerializer.Meta.fields
ordering_fields = ('id', 'created_at', 'last_modified',)
ordering = ('-id',)
##
# Util
##
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '')
return ip
<|code_end|>
| api/data_refinery_api/urls.py
<|code_start|>from rest_framework.routers import DefaultRouter
from django.conf.urls import url, include
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from django.views.generic import RedirectView
from .views import (
ExperimentDocumentView,
ExperimentList,
ExperimentDetail,
SampleList,
SampleDetail,
OrganismList,
PlatformList,
InstitutionList,
SurveyJobList,
DownloaderJobList,
ProcessorJobList,
ComputationalResultsList,
ProcessorList,
Stats,
CreateDatasetView,
DatasetView,
CreateApiTokenView,
APITokenView,
TranscriptomeIndexList,
TranscriptomeIndexDetail,
QNTargetsDetail,
QNTargetsAvailable,
CompendiumResultList,
CompendiumResultDetails,
ComputedFilesList,
OriginalFileList,
AboutStats,
FailedDownloaderJobStats,
FailedProcessorJobStats
)
# This provides _public_ access to the /admin interface!
# Enabling this by setting DEBUG to true this will allow unauthenticated access to the admin interface.
# Very useful for debugging (since we have no User accounts), but very dangerous for prod!
class AccessUser:
has_module_perms = has_perm = __getattr__ = lambda s, *a, **kw: True
if settings.DEBUG:
admin.site.has_permission = lambda r: setattr(r, 'user', AccessUser()) or True
schema_view = get_schema_view(
openapi.Info(
title="Refine.bio API",
default_version='v1',
description="""
refine.bio is a multi-organism collection of genome-wide transcriptome or gene expression data that has been obtained from publicly available repositories and uniformly processed and normalized. refine.bio allows biologists, clinicians, and machine learning researchers to search for experiments from different source repositories all in one place and build custom data sets for their questions of interest.
The swagger-ui view can be found [here](http://api.refine.bio/swagger/).
The ReDoc view can be found [here](http://api.refine.bio/).
Additional documentation can be found at [docs.refine.bio](http://docs.refine.bio/en/latest/).
### Questions/Feedback?
If you have a question or comment, please [file an issue on GitHub](https://github.com/AlexsLemonade/refinebio/issues) or send us an email at [ccdl@alexslemonade.org](mailto:ccdl@alexslemonade.org).
""",
terms_of_service="https://www.refine.bio/terms",
contact=openapi.Contact(email="ccdl@alexslemonade.org"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(r'^(?P<version>v1)/', include([
# Primary search and filter interface
url(r'^search/$', ExperimentDocumentView.as_view({'get': 'list'}), name='search'),
url(r'^experiments/$', ExperimentList.as_view(), name='experiments'),
url(r'^experiments/(?P<accession_code>.+)/$', ExperimentDetail.as_view(), name='experiments_detail'),
url(r'^samples/$', SampleList.as_view(), name='samples'),
url(r'^samples/(?P<accession_code>.+)/$', SampleDetail.as_view(), name='samples_detail'),
url(r'^organisms/$', OrganismList.as_view(), name='organisms'),
url(r'^platforms/$', PlatformList.as_view(), name='platforms'),
url(r'^institutions/$', InstitutionList.as_view(), name='institutions'),
url(r'^processors/$', ProcessorList.as_view(), name='processors'),
# Deliverables
url(r'^dataset/$', CreateDatasetView.as_view(), name='create_dataset'),
url(r'^dataset/(?P<id>[0-9a-f-]+)/$', DatasetView.as_view(), name='dataset'),
url(r'^token/$', CreateApiTokenView.as_view(), name='token'),
url(r'^token/(?P<id>[0-9a-f-]+)/$', APITokenView.as_view(), name='token_id'),
# Jobs
url(r'^jobs/survey/$', SurveyJobList.as_view(), name='survey_jobs'),
url(r'^jobs/downloader/$', DownloaderJobList.as_view(), name='downloader_jobs'),
url(r'^jobs/processor/$', ProcessorJobList.as_view(), name='processor_jobs'),
# Dashboard Driver
url(r'^stats/$', Stats.as_view(), name='stats'),
url(r'^stats/failures/downloader$', FailedDownloaderJobStats.as_view(), name='stats_failed_downloader'),
url(r'^stats/failures/processor$', FailedProcessorJobStats.as_view(), name='stats_failed_processor'),
url(r'^stats-about/$', AboutStats.as_view(), name='stats_about'),
# Transcriptome Indices and QN Targets
url(r'^transcriptome_indices/$', TranscriptomeIndexList.as_view(), name='transcriptome_indices'),
url(r'^transcriptome_indices/(?P<organism_name>.+)$', TranscriptomeIndexDetail.as_view(), name='transcriptome_indices_read'),
url(r'^qn_targets/$', QNTargetsAvailable.as_view(), name='qn_targets_available'),
url(r'^qn_targets/(?P<organism_name>.+)$', QNTargetsDetail.as_view(), name='qn_targets'),
# Computed Files
url(r'^computed_files/$', ComputedFilesList.as_view(), name='computed_files'),
url(r'^original_files/$', OriginalFileList.as_view(), name='original_files'),
url(r'^computational_results/$', ComputationalResultsList.as_view(), name='results'),
# Compendia
url(r'^compendia/$', CompendiumResultList.as_view(), name='compendium_results'),
url(r'^compendia/(?P<id>[0-9]+)/$', CompendiumResultDetails.as_view(), name='compendium_result'),
# v1 api docs
url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema_swagger_ui'),
url(r'^$', schema_view.with_ui('redoc', cache_timeout=0), name='schema_redoc'),
])),
# Admin
url(r'^admin/', admin.site.urls),
# Redirect root urls to latest version api docs
url(r'^swagger/$', RedirectView.as_view(url="/v1/swagger")),
url(r'^$', RedirectView.as_view(url="/v1")),
]
# This adds support explicitly typed endpoints such that appending '.json' returns that application type.
urlpatterns = format_suffix_patterns(urlpatterns)
<|code_end|>
api/data_refinery_api/views.py
<|code_start|>from datetime import timedelta, datetime
import requests
import nomad
from typing import Dict
from itertools import groupby
from re import match
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.db.models import Count, Prefetch, DateTimeField, OuterRef, Subquery
from django.db.models.functions import Trunc, Left
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import F, Q
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_TERMS,
LOOKUP_FILTER_RANGE,
LOOKUP_FILTER_PREFIX,
LOOKUP_FILTER_WILDCARD,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_GTE,
LOOKUP_QUERY_LT,
LOOKUP_QUERY_LTE,
LOOKUP_QUERY_EXCLUDE,
)
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
IdsFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackend
)
from django_filters.rest_framework import DjangoFilterBackend
import django_filters
from elasticsearch_dsl import TermsFacet, DateHistogramFacet
from rest_framework import status, filters, generics, mixins
from rest_framework.exceptions import APIException, NotFound
from rest_framework.exceptions import ValidationError
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from data_refinery_api.serializers import (
ComputationalResultSerializer,
ComputationalResultWithUrlSerializer,
DetailedExperimentSerializer,
DetailedSampleSerializer,
ExperimentSerializer,
InstitutionSerializer,
OrganismIndexSerializer,
OrganismSerializer,
PlatformSerializer,
ProcessorSerializer,
SampleSerializer,
CompendiumResultSerializer,
CompendiumResultWithUrlSerializer,
QNTargetSerializer,
ComputedFileListSerializer,
OriginalFileListSerializer,
# Job
DownloaderJobSerializer,
ProcessorJobSerializer,
SurveyJobSerializer,
# Dataset
APITokenSerializer,
CreateDatasetSerializer,
DatasetSerializer,
)
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
ComputationalResult,
ComputationalResultAnnotation,
CompendiumResult,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SurveyJob,
)
from data_refinery_common.models.documents import (
ExperimentDocument
)
from data_refinery_common.utils import (
get_env_variable,
get_active_volumes,
get_nomad_jobs_breakdown,
get_nomad_jobs
)
from data_refinery_common.logging import get_and_configure_logger
from .serializers import ExperimentDocumentSerializer
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
logger = get_and_configure_logger(__name__)
##
# Variables
##
JOB_CREATED_AT_CUTOFF = datetime(2019, 6, 5, tzinfo=timezone.utc)
##
# ElasticSearch
##
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from six import iteritems
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet['facet'].get_aggregation()
queryset.aggs.bucket(field, agg)\
.metric('total_samples', 'cardinality', field='downloadable_samples', precision_threshold=40000)
return queryset
##
# ElasticSearch powered Search and Filter
##
@method_decorator(name='list', decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name='technology', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name='has_publication', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name='platform', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name='organism', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name='num_processed_samples', in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
"""))
class ExperimentDocumentView(DocumentViewSet):
""" ElasticSearch powered experiment search. """
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended
]
# Primitive
lookup_field = 'id'
# Define search fields
# Is this exhaustive enough?
search_fields = {
'title': {'boost': 10},
'publication_authors': {'boost': 8}, # "People will search themselves"
'publication_title': {'boost': 5},
'submitter_institution': {'boost': 3},
'description': {'boost': 2},
'accession_code': None,
'alternate_accession_code': None,
'publication_doi': None,
'pubmed_id': None,
'sample_metadata_fields': None,
'platform_names': None
}
# Define filtering fields
filter_fields = {
'id': {
'field': '_id',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN
],
},
'technology': 'technology',
'has_publication': 'has_publication',
'platform': 'platform_accession_codes',
'organism': 'organism_names',
'num_processed_samples': {
'field': 'num_processed_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
},
'num_downloadable_samples': {
'field': 'num_downloadable_samples',
'lookups': [
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT
],
}
}
# Define ordering fields
ordering_fields = {
'id': 'id',
'title': 'title.raw',
'description': 'description.raw',
'num_total_samples': 'num_total_samples',
'num_downloadable_samples': 'num_downloadable_samples',
'source_first_published': 'source_first_published'
}
# Specify default ordering
ordering = ('_score', '-num_total_samples', 'id', 'title', 'description', '-source_first_published')
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
'technology': {
'field': 'technology',
'facet': TermsFacet,
'enabled': True # These are enabled by default, which is more expensive but more simple.
},
'organism_names': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': True,
'options': {
'size': 999999
}
},
'platform_accession_codes': {
'field': 'platform_accession_codes',
'facet': TermsFacet,
'enabled': True,
'global': False,
'options': {
'size': 999999
}
},
'has_publication': {
'field': 'has_publication',
'facet': TermsFacet,
'enabled': True,
'global': False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
'technology_global': {
'field': 'technology',
'facet': TermsFacet,
'enabled': False,
'global': True
},
'organism_names_global': {
'field': 'organism_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'platform_names_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
'options': {
'size': 999999
}
},
'has_publication_global': {
'field': 'platform_names',
'facet': TermsFacet,
'enabled': False,
'global': True,
},
}
faceted_search_param = 'facet'
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data['facets'] = self.transform_es_facets(response.data['facets'])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet['buckets']:
if field == 'has_publication':
filter_group[bucket['key_as_string']] = bucket['total_samples']['value']
else:
filter_group[bucket['key']] = bucket['total_samples']['value']
result[field] = filter_group
return result
##
# Dataset
##
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(name='get', decorator=swagger_auto_schema(operation_description="View a single Dataset.",manual_parameters=[
openapi.Parameter(
name='details', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)]))
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None)) # partial updates not supported
@method_decorator(name='put', decorator=swagger_auto_schema(operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""))
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = 'id'
@staticmethod
def _should_display_on_engagement_bot(email: str) -> bool:
return email is not None \
and email.find("cansav09") != 0 \
and email.find("arielsvn") != 0 \
and email.find("jaclyn.n.taroni") != 0 \
and email.find("kurt.wheeler") != 0 \
and email.find("greenescientist") != 0 \
and email.find("@alexslemonade.org") == -1 \
and email.find("miserlou") != 0 \
and email.find("michael.zietz@gmail.com") != 0 \
and email.find("d.prasad") != 0 \
and email.find("daniel.himmelstein@gmail.com") != 0 \
and email.find("dv.prasad991@gmail.com") != 0
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that. """
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data['data'].keys():
accessions = new_data['data'][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(experiment.samples.filter(is_processed=True, organism__in=qn_organisms).values_list('accession_code', flat=True))
new_data['data'][key] = sample_codes
if old_object.is_processed:
raise APIException("You may not update Datasets which have already been processed")
if new_data.get('start'):
# Make sure we have a valid activated token.
token_id = self.request.data.get('token_id', None)
if not token_id:
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
raise APIException("You must provide an active API token ID")
supplied_email_address = self.request.data.get('email_address', None)
email_ccdl_ok = self.request.data.get('email_ccdl_ok', False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if supplied_email_address is not None:
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get('no_send_job', False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException("Unable to queue download job. Something has gone"
" wrong and we have been notified about it.")
serializer.validated_data['is_processing'] = True
obj = serializer.save()
if settings.RUNNING_IN_CLOUD and settings.ENGAGEMENTBOT_WEBHOOK is not None \
and DatasetView._should_display_on_engagement_bot(supplied_email_address):
try:
try:
remote_ip = get_client_ip(self.request)
city = requests.get('https://ipapi.co/' + remote_ip + '/json/', timeout=10).json()['city']
except Exception:
city = "COULD_NOT_DETERMINE"
new_user_text = "New user " + supplied_email_address + " from " + city + " [" + remote_ip + "] downloaded a dataset! (" + str(old_object.id) + ")"
webhook_url = settings.ENGAGEMENTBOT_WEBHOOK
slack_json = {
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments":[
{ "color": "good",
"text": new_user_text
}
]
}
response = requests.post(
webhook_url,
json=slack_json,
headers={'Content-Type': 'application/json'},
timeout=10
)
except Exception as e:
# It doens't really matter if this didn't work
logger.error(e)
pass
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data['data'] = old_data
serializer.validated_data['aggregate_by'] = old_aggregate
serializer.save()
class CreateApiTokenView(generics.CreateAPIView):
"""
token_create
This endpoint can be used to create and activate tokens. These tokens can be used
in requests that provide urls to download computed files. They are a way to accept
our terms of service.
```py
import requests
import json
response = requests.post('https://api.refine.bio/v1/token/')
token_id = response.json()['id']
response = requests.put('https://api.refine.bio/v1/token/' + token_id + '/', json.dumps({'is_activated': True}), headers={'Content-Type': 'application/json'})
```
The token id needs to be provided in the HTTP request in the API-KEY header.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
model = APIToken
serializer_class = APITokenSerializer
@method_decorator(name='patch', decorator=swagger_auto_schema(auto_schema=None))
class APITokenView(generics.RetrieveUpdateAPIView):
"""
Read and modify Api Tokens.
get:
Return details about a specific token.
put:
This can be used to activate a specific token by sending `is_activated: true`.
"""
model = APIToken
lookup_field = 'id'
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
##
# Experiments
##
class ExperimentList(generics.ListAPIView):
""" Paginated list of all experiments. Advanced filtering can be done with the `/search` endpoint. """
model = Experiment
queryset = Experiment.public_objects.all()
serializer_class = ExperimentSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'description',
'accession_code',
'alternate_accession_code',
'source_database',
'source_url',
'has_publication',
'publication_title',
'publication_doi',
'pubmed_id',
'organisms',
'submitter_institution',
'created_at',
'last_modified',
'source_first_published',
'source_last_modified',
)
class ExperimentDetail(generics.RetrieveAPIView):
""" Retrieve details for an experiment given it's accession code """
lookup_field = "accession_code"
queryset = Experiment.public_objects.all()
serializer_class = DetailedExperimentSerializer
##
# Samples
##
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='dataset_id', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns samples that are added to a dataset.",
),
openapi.Parameter(
name='experiment_accession_code', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns only the samples associated with an experiment accession code.",
),
openapi.Parameter(
name='accession_codes', in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Provide a list of sample accession codes separated by commas and the endpoint will only return information about these samples.",
),
]))
class SampleList(generics.ListAPIView):
""" Returns detailed information about Samples """
model = Sample
serializer_class = DetailedSampleSerializer
filter_backends = (filters.OrderingFilter, DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('-is_processed')
filterset_fields = (
'title',
'organism',
'source_database',
'source_archive_url',
'has_raw',
'platform_name',
'technology',
'manufacturer',
'sex',
'age',
'specimen_part',
'genotype',
'disease',
'disease_stage',
'cell_line',
'treatment',
'race',
'subject',
'compound',
'time',
'is_processed',
'is_public'
)
def get_queryset(self):
"""
ref https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
"""
queryset = Sample.public_objects \
.prefetch_related('organism') \
.prefetch_related('results') \
.prefetch_related('results__processor') \
.prefetch_related('results__computationalresultannotation_set') \
.prefetch_related('results__computedfile_set') \
.filter(**self.get_query_params_filters())
# case insensitive search https://docs.djangoproject.com/en/2.1/ref/models/querysets/#icontains
filter_by = self.request.query_params.get('filter_by', None)
if filter_by:
queryset = queryset.filter(Q(accession_code__icontains=filter_by) |
Q(title__icontains=filter_by) |
Q(sex__icontains=filter_by) |
Q(age__icontains=filter_by) |
Q(specimen_part__icontains=filter_by) |
Q(genotype__icontains=filter_by) |
Q(disease__icontains=filter_by) |
Q(disease_stage__icontains=filter_by) |
Q(cell_line__icontains=filter_by) |
Q(treatment__icontains=filter_by) |
Q(race__icontains=filter_by) |
Q(subject__icontains=filter_by) |
Q(compound__icontains=filter_by) |
Q(time__icontains=filter_by))
return queryset
def get_query_params_filters(self):
""" We do advanced filtering on the queryset depending on the query parameters.
This returns the parameters that should be used for that. """
filter_dict = dict()
ids = self.request.query_params.get('ids', None)
if ids is not None:
ids = [int(x) for x in ids.split(',')]
filter_dict['pk__in'] = ids
experiment_accession_code = self.request.query_params.get('experiment_accession_code', None)
if experiment_accession_code:
experiment = get_object_or_404(Experiment.objects.values('id'), accession_code=experiment_accession_code)
filter_dict['experiments__in'] = [experiment['id']]
accession_codes = self.request.query_params.get('accession_codes', None)
if accession_codes:
accession_codes = accession_codes.split(',')
filter_dict['accession_code__in'] = accession_codes
dataset_id = self.request.query_params.get('dataset_id', None)
if dataset_id:
dataset = get_object_or_404(Dataset, id=dataset_id)
# Python doesn't provide a prettier way of doing this that I know about.
filter_dict['accession_code__in'] = [item for sublist in dataset.data.values() for item in sublist]
# Accept Organism in both name and ID form
organism_name = self.request.query_params.get('organism__name', None)
if organism_name:
filter_dict['organism__name'] = organism_name
return filter_dict
class SampleDetail(generics.RetrieveAPIView):
""" Retrieve the details for a Sample given it's accession code """
lookup_field = "accession_code"
queryset = Sample.public_objects.all()
serializer_class = DetailedSampleSerializer
##
# Processor
##
class ProcessorList(generics.ListAPIView):
"""List all processors."""
queryset = Processor.objects.all()
serializer_class = ProcessorSerializer
##
# Results
##
class ComputationalResultsList(generics.ListAPIView):
"""
computational_results_list
This lists all `ComputationalResult`. Each one contains meta-information about the output of a computer process. (Ex Salmon).
This can return valid S3 urls if a valid [token](#tag/token) is sent in the header `HTTP_API_KEY`.
"""
queryset = ComputationalResult.public_objects.all()
def get_serializer_class(self):
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return ComputationalResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return ComputationalResultSerializer
def filter_queryset(self, queryset):
filter_dict = self.request.query_params.dict()
filter_dict.pop('limit', None)
filter_dict.pop('offset', None)
return queryset.filter(**filter_dict)
##
# Search Filter Models
##
class OrganismList(generics.ListAPIView):
"""
Unpaginated list of all the available organisms.
"""
queryset = Organism.objects.all()
serializer_class = OrganismSerializer
paginator = None
class PlatformList(generics.ListAPIView):
"""
Unpaginated list of all the available "platform" information
"""
serializer_class = PlatformSerializer
paginator = None
def get_queryset(self):
return Sample.public_objects.all().values("platform_accession_code", "platform_name").distinct()
class InstitutionList(generics.ListAPIView):
"""
Unpaginated list of all the available "institution" information
"""
serializer_class = InstitutionSerializer
paginator = None
def get_queryset(self):
return Experiment.public_objects.all().values("submitter_institution").distinct()
##
# Jobs
##
class SurveyJobList(generics.ListAPIView):
"""
List of all SurveyJob.
"""
model = SurveyJob
queryset = SurveyJob.objects.all()
serializer_class = SurveyJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = SurveyJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='sample_accession_code', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description='List the downloader jobs associated with a sample',
),
openapi.Parameter(
name='nomad', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description='Only return jobs that are in the nomad queue currently',
),
]))
class DownloaderJobList(generics.ListAPIView):
"""
List of all DownloaderJob
"""
model = DownloaderJob
serializer_class = DownloaderJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = DownloaderJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
def get_queryset(self):
queryset = DownloaderJob.objects.all()
sample_accession_code = self.request.query_params.get('sample_accession_code', None)
if sample_accession_code:
queryset = queryset.filter(original_files__samples__accession_code=sample_accession_code).distinct()
nomad = self.request.query_params.get('nomad', None)
if nomad:
nomad_jobs_ids = [job['ID'] for job in get_nomad_jobs()]
queryset = queryset.filter(nomad_job_id__in=nomad_jobs_ids)
return queryset
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='sample_accession_code', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description='List the processor jobs associated with a sample',
),
openapi.Parameter(
name='nomad', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description='Only return jobs that are in the nomad queue currently',
),
]))
class ProcessorJobList(generics.ListAPIView):
"""
List of all ProcessorJobs.
"""
model = ProcessorJob
serializer_class = ProcessorJobSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = ProcessorJobSerializer.Meta.fields
ordering_fields = ('id', 'created_at')
ordering = ('-id',)
def get_queryset(self):
queryset = ProcessorJob.objects.all()
sample_accession_code = self.request.query_params.get('sample_accession_code', None)
if sample_accession_code:
queryset = queryset.filter(original_files__samples__accession_code=sample_accession_code).distinct()
nomad = self.request.query_params.get('nomad', None)
if nomad:
nomad_jobs_ids = [job['ID'] for job in get_nomad_jobs()]
queryset = queryset.filter(nomad_job_id__in=nomad_jobs_ids)
return queryset
###
# Statistics
###
def get_start_date(range_param):
current_date = datetime.now(tz=timezone.utc)
return {
'day': current_date - timedelta(days=1),
'week': current_date - timedelta(weeks=1),
'month': current_date - timedelta(days=30),
'year': current_date - timedelta(days=365)
}.get(range_param)
def paginate_queryset_response(queryset, request):
paginator = LimitOffsetPagination()
page_items = paginator.paginate_queryset(queryset, request)
return Response(data={
'results': [x.to_dict() for x in page_items],
'limit': paginator.limit,
'offset': paginator.offset,
'count': paginator.count
},
status=status.HTTP_200_OK)
class FailedDownloaderJobStats(APIView):
@swagger_auto_schema(manual_parameters=[openapi.Parameter(
name='range', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=('day', 'week', 'month', 'year',)
)])
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop('range', 'day')
start_date = get_start_date(range_param)
jobs = DownloaderJob.objects\
.filter(created_at__gt=start_date)\
.annotate(reason=Left('failure_reason', 80))\
.values('reason')\
.annotate(
job_count=Count('reason'),
sample_count=Count('original_files__samples', distinct=True, filter=Q(original_files__samples__is_processed=False))
)\
.order_by('-job_count')
return paginate_queryset_response(jobs, request)
class FailedProcessorJobStats(APIView):
@swagger_auto_schema(manual_parameters=[openapi.Parameter(
name='range', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=('day', 'week', 'month', 'year',)
)])
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop('range', 'day')
start_date = get_start_date(range_param)
jobs = ProcessorJob.objects\
.filter(created_at__gt=start_date)\
.annotate(reason=Left('failure_reason', 80))\
.values('reason')\
.annotate(
job_count=Count('reason'),
sample_count=Count('original_files__samples', distinct=True, filter=Q(original_files__samples__is_processed=False))
)\
.order_by('-job_count')
return paginate_queryset_response(jobs, request)
class AboutStats(APIView):
""" Returns general stats for the site, used in the about page """
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
# static values for now
dummy = request.query_params.dict().pop('dummy', None)
if dummy:
# add a dummy response, calculated these on 09/25/2019
result = {
'samples_available': 904953 + 391022,
'total_size_in_bytes': 832195361132962,
'supported_organisms': 43 + 159,
'experiments_processed': 35785 + 8661
}
return Response(result)
result = {
'samples_available': self._get_samples_available(),
'total_size_in_bytes': OriginalFile.objects.aggregate(total_size=Sum('size_in_bytes'))['total_size'],
'supported_organisms': self._get_supported_organisms(),
'experiments_processed': self._get_experiments_processed()
}
return Response(result)
def _get_experiments_processed(self):
""" total experiments with at least one sample processed """
experiments_with_sample_processed = Experiment.objects\
.annotate(
processed_samples_count=Count('samples', filter=Q(samples__is_processed=True)),
)\
.filter(Q(processed_samples_count__gt=1))\
.count()
experiments_with_sample_quant = ComputedFile.objects\
.filter(filename='quant.sf', result__samples__is_processed=False)\
.values_list('result__samples__experiments', flat=True)\
.distinct()\
.count()
return experiments_with_sample_processed + experiments_with_sample_quant
def _get_supported_organisms(self):
""" count organisms with qn targets or that have at least one sample with quant files """
organisms_with_qn_targets = Organism.objects.filter(qn_target__isnull=False).count()
organisms_without_qn_targets = Organism.objects.filter(
qn_target__isnull=True,
sample__is_processed=False,
sample__technology='RNA-SEQ',
sample__results__computedfile__filename='quant.sf'
).distinct().count()
return organisms_with_qn_targets + organisms_without_qn_targets
def _get_samples_available(self):
""" count the total number of samples that are processed or that have a quant.sf file associated with them """
processed_samples = Sample.objects.filter(is_processed=True).count()
unprocessed_samples_with_quant = Sample.objects.filter(
is_processed=False,
technology='RNA-SEQ',
results__computedfile__filename='quant.sf'
).distinct().count()
return processed_samples + unprocessed_samples_with_quant
class Stats(APIView):
""" Statistics about the health of the system. """
@swagger_auto_schema(manual_parameters=[openapi.Parameter(
name='range', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=('day', 'week', 'month', 'year',)
)])
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop('range', None)
cached_stats = Stats.calculate_stats(range_param)
return Response(cached_stats)
@classmethod
def calculate_stats(cls, range_param):
data = {}
data['generated_on'] = timezone.now()
data['survey_jobs'] = cls._get_job_stats(SurveyJob.objects, range_param)
data['downloader_jobs'] = cls._get_job_stats(DownloaderJob.objects, range_param)
data['processor_jobs'] = cls._get_job_stats(ProcessorJob.objects, range_param)
data['experiments'] = cls._get_object_stats(Experiment.objects, range_param)
# processed and unprocessed samples stats
data['unprocessed_samples'] = cls._get_object_stats(Sample.objects.filter(is_processed=False), range_param, 'last_modified')
data['processed_samples'] = cls._get_object_stats(Sample.processed_objects, range_param, 'last_modified')
data['processed_samples']['last_hour'] = cls._samples_processed_last_hour()
data['processed_samples']['technology'] = {}
techs = Sample.processed_objects.values('technology').annotate(count=Count('technology'))
for tech in techs:
if not tech['technology'] or not tech['technology'].strip():
continue
data['processed_samples']['technology'][tech['technology']] = tech['count']
data['processed_samples']['organism'] = {}
organisms = Sample.processed_objects.values('organism__name').annotate(count=Count('organism__name'))
for organism in organisms:
if not organism['organism__name']:
continue
data['processed_samples']['organism'][organism['organism__name']] = organism['count']
data['processed_experiments'] = cls._get_object_stats(Experiment.processed_public_objects)
data['active_volumes'] = list(get_active_volumes())
data['dataset'] = cls._get_dataset_stats(range_param)
if range_param:
data['input_data_size'] = cls._get_input_data_size()
data['output_data_size'] = cls._get_output_data_size()
data.update(get_nomad_jobs_breakdown())
return data
EMAIL_USERNAME_BLACKLIST = [
'arielsvn',
'cansav09',
'd.prasad',
'daniel.himmelstein',
'dv.prasad991',
'greenescientist',
'jaclyn.n.taroni',
'kurt.wheeler91',
'michael.zietz',
'miserlou'
]
@classmethod
def _get_dataset_stats(cls, range_param):
"""Returns stats for processed datasets"""
filter_query = Q()
for username in Stats.EMAIL_USERNAME_BLACKLIST:
filter_query = filter_query | Q(email_address__startswith=username)
filter_query = filter_query | Q(email_address__endswith='@alexslemonade.org')
processed_datasets = Dataset.objects.filter(is_processed=True, email_address__isnull=False).exclude(filter_query)
result = processed_datasets.aggregate(
total=Count('id'),
aggregated_by_experiment=Count('id', filter=Q(aggregate_by='EXPERIMENT')),
aggregated_by_species=Count('id', filter=Q(aggregate_by='SPECIES')),
scale_by_none=Count('id', filter=Q(scale_by='NONE')),
scale_by_minmax=Count('id', filter=Q(scale_by='MINMAX')),
scale_by_standard=Count('id', filter=Q(scale_by='STANDARD')),
scale_by_robust=Count('id', filter=Q(scale_by='ROBUST')),
)
if range_param:
# We don't save the dates when datasets are processed, but we can use
# `last_modified`, since datasets aren't modified again after they are processed
result['timeline'] = cls._get_intervals(
processed_datasets,
range_param,
'last_modified'
).annotate(
total=Count('id'),
total_size=Sum('size_in_bytes')
)
return result
@classmethod
def _samples_processed_last_hour(cls):
current_date = datetime.now(tz=timezone.utc)
start = current_date - timedelta(hours=1)
return Sample.processed_objects.filter(last_modified__range=(start, current_date)).count()
@classmethod
def _get_input_data_size(cls):
total_size = OriginalFile.objects.filter(
sample__is_processed=True # <-- SLOW
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_output_data_size(cls):
total_size = ComputedFile.public_objects.all().filter(
s3_bucket__isnull=False,
s3_key__isnull=True
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
@classmethod
def _get_job_stats(cls, jobs, range_param):
start_filter = Q()
if range_param:
start_date = get_start_date(range_param)
start_filter = start_filter | Q(start_time__gte=start_date) | Q(start_time__isnull=True)
result = jobs.filter(start_filter).aggregate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
open=Count('id', filter=Q(start_time__isnull=False,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF)),
)
# via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
result['average_time'] = jobs.filter(start_filter).filter(
start_time__isnull=False,
end_time__isnull=False,
success=True
).aggregate(
average_time=Avg(F('end_time') - F('start_time'))
)['average_time']
if not result['average_time']:
result['average_time'] = 0
else:
result['average_time'] = result['average_time'].total_seconds()
if range_param:
result['timeline'] = cls._get_intervals(jobs, range_param) \
.annotate(
total=Count('id'),
successful=Count('id', filter=Q(success=True)),
failed=Count('id', filter=Q(success=False)),
pending=Count('id', filter=Q(start_time__isnull=True, success__isnull=True)),
open=Count('id', filter=Q(start_time__isnull=False, success__isnull=True)),
)
return result
@classmethod
def _get_object_stats(cls, objects, range_param=False, field='created_at'):
result = {
'total': objects.count()
}
if range_param:
result['timeline'] = cls._get_intervals(objects, range_param, field)\
.annotate(total=Count('id'))
return result
@classmethod
def _get_intervals(cls, objects, range_param, field='last_modified'):
range_to_trunc = {
'day': 'hour',
'week': 'day',
'month': 'day',
'year': 'month'
}
range_to_start_date = get_start_date(range_param)
# truncate the `last_modified` field by hour, day or month depending on the `range` param
# and annotate each object with that. This will allow us to count the number of objects
# on each interval with a single query
# ref https://stackoverflow.com/a/38359913/763705
return objects.annotate(start=Trunc(field, range_to_trunc.get(range_param), output_field=DateTimeField())) \
.values('start') \
.filter(start__gte=range_to_start_date.get(range_param))
###
# Transcriptome Indices
###
class TranscriptomeIndexList(generics.ListAPIView):
""" List all Transcriptome Indices. These are a special type of process result, necessary for processing other SRA samples. """
serializer_class = OrganismIndexSerializer
def get_queryset(self):
return OrganismIndex.objects.distinct("organism", "index_type")
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name='length', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING,
description="",
enum=('short', 'long',),
default='short'
),
]))
class TranscriptomeIndexDetail(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
def get_object(self):
organism_name = self.kwargs['organism_name'].upper()
length = self.request.query_params.get('length', 'short')
# Get the correct organism index object, serialize it, and return it
transcription_length = "TRANSCRIPTOME_" + length.upper()
try:
organism = Organism.objects.get(name=organism_name.upper())
organism_index = OrganismIndex.objects.exclude(s3_url__exact="")\
.distinct("organism", "index_type")\
.get(organism=organism, index_type=transcription_length)
return organism_index
except OrganismIndex.DoesNotExist:
raise Http404('Organism does not exists')
###
# Compendia
###
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='latest_version', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="`True` will only return the highest `compendium_version` for each primary_organism.",
),
openapi.Parameter(
name='quant_sf_only', in_=openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN,
description="`True` for RNA-seq Sample Compendium results or `False` for quantile normalized.",
),
]))
class CompendiumResultList(generics.ListAPIView):
"""
List all CompendiaResults with filtering.
"""
model = CompendiumResult
queryset = CompendiumResult.objects.all()
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = ['primary_organism__name', 'compendium_version', 'quant_sf_only']
ordering_fields = ('primary_organism__name', 'compendium_version', 'id')
ordering = ('-primary_organism__name',)
def get_queryset(self):
public_result_queryset = CompendiumResult.objects.filter(result__is_public=True)
latest_version = self.request.query_params.get('latest_version', False)
if latest_version:
version_filter = Q(primary_organism=OuterRef('primary_organism'),
quant_sf_only=OuterRef('quant_sf_only'))
latest_version = CompendiumResult.objects.filter(version_filter)\
.order_by('-compendium_version')\
.values('compendium_version')
return public_result_queryset.annotate(
latest_version=Subquery(latest_version[:1])
).filter(compendium_version=F('latest_version'))
return public_result_queryset
def get_serializer_class(self):
try:
token_id = self.request.META.get('HTTP_API_KEY', None)
token = APIToken.objects.get(id=token_id, is_activated=True)
return CompendiumResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return CompendiumResultSerializer
class CompendiumResultDetails(generics.RetrieveAPIView):
"""
Get a specific Compendium Result
"""
model = CompendiumResult
queryset = CompendiumResult.objects.filter(is_public=True)
lookup_field = 'id'
def get_serializer_class(self):
try:
token_id = self.request.META.get('HTTP_API_KEY', None)
token = APIToken.objects.get(id=token_id, is_activated=True)
return CompendiumResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return CompendiumResultSerializer
###
# QN Targets
###
class QNTargetsAvailable(generics.ListAPIView):
"""
This is a list of all of the organisms which have available QN Targets
"""
serializer_class = OrganismSerializer
paginator = None
def get_queryset(self):
return Organism.get_objects_with_qn_targets()
@method_decorator(name='get', decorator=swagger_auto_schema(manual_parameters=[
openapi.Parameter(
name='organism_name', in_=openapi.IN_PATH, type=openapi.TYPE_STRING,
description="Eg `DANIO_RERIO`, `MUS_MUSCULUS`",
)], responses={404: 'QN Target not found for the given organism.'}))
class QNTargetsDetail(generics.RetrieveAPIView):
"""
Get a detailed view of the Quantile Normalization file for an organism.
"""
serializer_class = QNTargetSerializer
def get_object(self):
organism = self.kwargs['organism_name']
organism = organism.upper().replace(" ", "_")
try:
organism_id = Organism.get_object_for_name(organism).id
annotation = ComputationalResultAnnotation.objects.filter(
data__organism_id=organism_id,
data__is_qn=True
).order_by(
'-created_at'
).first()
qn_target = annotation.result.computedfile_set.first()
except Exception:
raise NotFound("Don't have a target for that organism!")
if not qn_target:
raise NotFound("Don't have a target for that organism!!")
return qn_target
##
# Computed Files
##
class ComputedFilesList(generics.ListAPIView):
"""
computed_files_list
ComputedFiles are representation of files created by data-refinery processes.
This can also be used to fetch all the compendia files we have generated with:
```
GET /computed_files?is_compendia=True&is_public=True
```
"""
queryset = ComputedFile.objects.all()
serializer_class = ComputedFileListSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = (
'id',
'samples',
'is_qn_target',
'is_smashable',
'is_qc',
'is_compendia',
'quant_sf_only',
'svd_algorithm',
'compendia_version',
'created_at',
'last_modified',
)
ordering_fields = ('id', 'created_at', 'last_modified', 'compendia_version',)
ordering = ('-id',)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(ComputedFilesList, self).get_serializer_context()
token_id = self.request.META.get('HTTP_API_KEY', None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, 'token': token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
class OriginalFileList(generics.ListAPIView):
"""
original_files_list
List Original Files that are associated with Samples. These are the files we proccess.
"""
queryset = OriginalFile.objects.all()
serializer_class = OriginalFileListSerializer
filter_backends = (DjangoFilterBackend, filters.OrderingFilter,)
filterset_fields = OriginalFileListSerializer.Meta.fields
ordering_fields = ('id', 'created_at', 'last_modified',)
ordering = ('-id',)
##
# Util
##
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '')
return ip
<|code_end|>
|
Compendium Missing Generated File
### Context
`RATTUS_NORVEGICUS/rattus_norvegicus.tsv` is missing.
<img width="832" alt="Screen Shot 2019-11-27 at 10 29 50 PM" src="https://user-images.githubusercontent.com/1075609/69775087-ae306e00-1165-11ea-9a64-1035c56ea079.png">
### Problem or idea
This should be included. I looked it up, I removed this when removing the s3 copy to remove the extra computed files.
This means that any compendia generated in the past 9 days are not available as well.
### Solution or next step
Deploy fix for compendia output file.
I will add a list of compendia here that need to be regenerated.
Regenerate those compendia.
| workers/data_refinery_workers/processors/create_compendia.py
<|code_start|>import logging
import os
import shutil
import time
import itertools
from typing import Dict
from django.utils import timezone
from django.db.models import Q, Count
from fancyimpute import IterativeSVD
import numpy as np
import pandas as pd
import psutil
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputationalResultAnnotation,
CompendiumResult,
CompendiumResultOrganismAssociation,
ComputedFile,
Organism,
Pipeline)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
pd.set_option('mode.chained_assignment', None)
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BYTES_IN_GB = 1024 * 1024 * 1024
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName('DEBUG'))
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message,
total_cpu=psutil.cpu_percent(),
process_ram=ram_in_GB,
job_id=job_id)
if start_time:
logger.debug('Duration: %s' % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _prepare_input(job_context: Dict) -> Dict:
start_time = log_state("prepare input", job_context["job"].id)
job_context["primary_organism"] = max(job_context["samples"],
key=lambda organism:len(job_context["samples"][organism]))
job_context["all_organisms"] = job_context["samples"].keys()
all_samples = list(itertools.chain(*job_context["samples"].values()))
job_context["samples"] = {job_context["primary_organism"]: all_samples}
job_context = smashing_utils.prepare_files(job_context)
# Compendia jobs only run for one organism, so we know the only
# key will be the organism name, unless of course we've already failed.
if job_context['job'].success is not False:
job_context["organism_name"] = job_context['group_by_keys'][0]
# TEMPORARY for iterating on compendia more quickly. Rather
# than downloading the data from S3 each run we're just gonna
# use the same directory every job.
job_context["old_work_dir"] = job_context["work_dir"]
job_context["work_dir"] = SMASHING_DIR + job_context["organism_name"] + "/"
if not os.path.exists(job_context["work_dir"]):
os.makedirs(job_context["work_dir"])
log_state("prepare input done", job_context["job"].id, start_time)
return job_context
def _prepare_frames(job_context: Dict) -> Dict:
start_prepare_frames = log_state("start _prepare_frames", job_context["job"].id)
job_context['unsmashable_files'] = []
job_context['num_samples'] = 0
# Smash all of the sample sets
logger.debug("About to smash!",
dataset_count=len(job_context['dataset'].data),
job_id=job_context['job'].id)
try:
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop('input_files').items():
job_context = smashing_utils.process_frames_for_key(key, input_files, job_context)
# if len(job_context['all_frames']) < 1:
# TODO: Enable this check?
except Exception as e:
raise utils.ProcessorJobError("Could not prepare frames for compendia.",
success=False,
dataset_id=job_context['dataset'].id,
processor_job_id=job_context['job_id'],
num_input_files=job_context['num_input_files'])
job_context['dataset'].success = True
job_context['dataset'].save()
log_state("end _prepare_frames", job_context["job"].id, start_prepare_frames)
return job_context
def _perform_imputation(job_context: Dict) -> Dict:
"""
Take the inputs and perform the primary imputation.
Via https://github.com/AlexsLemonade/refinebio/issues/508#issuecomment-435879283:
- Combine all microarray samples with a full join to form a
microarray_expression_matrix (this may end up being a DataFrame).
- Combine all RNA-seq samples (lengthScaledTPM) with a full outer join
to form a rnaseq_expression_matrix.
- Calculate the sum of the lengthScaledTPM values for each row (gene) of
the rnaseq_expression_matrix (rnaseq_row_sums).
- Calculate the 10th percentile of rnaseq_row_sums
- Drop all rows in rnaseq_expression_matrix with a row sum < 10th percentile of
rnaseq_row_sums; this is now filtered_rnaseq_matrix
- log2(x + 1) transform filtered_rnaseq_matrix; this is now log2_rnaseq_matrix
- Set all zero values in log2_rnaseq_matrix to NA, but make sure to keep track of
where these zeroes are
- Perform a full outer join of microarray_expression_matrix and
log2_rnaseq_matrix; combined_matrix
- Remove genes (rows) with >30% missing values in combined_matrix
- Remove samples (columns) with >50% missing values in combined_matrix
- "Reset" zero values that were set to NA in RNA-seq samples (i.e., make these zero
again) in combined_matrix
- Transpose combined_matrix; transposed_matrix
- Perform imputation of missing values with IterativeSVD (rank=10) on
the transposed_matrix; imputed_matrix
-- with specified svd algorithm or skip
- Untranspose imputed_matrix (genes are now rows, samples are now columns)
- Quantile normalize imputed_matrix where genes are rows and samples are columns
"""
imputation_start = log_state("start perform imputation", job_context["job"].id)
job_context['time_start'] = timezone.now()
rnaseq_row_sums_start = log_state("start rnaseq row sums", job_context["job"].id)
# We potentially can have a microarray-only compendia but not a RNASeq-only compendia
log2_rnaseq_matrix = None
if job_context['rnaseq_matrix'] is not None:
# Drop any genes that are entirely NULL in the RNA-Seq matrix
job_context['rnaseq_matrix'] = job_context['rnaseq_matrix'].dropna(axis='columns',
how='all')
# Calculate the sum of the lengthScaledTPM values for each row
# (gene) of the rnaseq_matrix (rnaseq_row_sums)
rnaseq_row_sums = np.sum(job_context['rnaseq_matrix'], axis=1)
log_state("end rnaseq row sums", job_context["job"].id, rnaseq_row_sums_start)
rnaseq_decile_start = log_state("start rnaseq decile", job_context["job"].id)
# Calculate the 10th percentile of rnaseq_row_sums
rnaseq_tenth_percentile = np.percentile(rnaseq_row_sums, 10)
log_state("end rnaseq decile", job_context["job"].id, rnaseq_decile_start)
drop_start = log_state("drop all rows", job_context["job"].id)
# Drop all rows in rnaseq_matrix with a row sum < 10th
# percentile of rnaseq_row_sums; this is now
# filtered_rnaseq_matrix
# TODO: This is probably a better way to do this with `np.where`
rows_to_filter = []
for (x, sum_val) in rnaseq_row_sums.items():
if sum_val < rnaseq_tenth_percentile:
rows_to_filter.append(x)
del rnaseq_row_sums
log_state("actually calling drop()", job_context["job"].id)
filtered_rnaseq_matrix = job_context.pop('rnaseq_matrix').drop(rows_to_filter)
del rows_to_filter
log_state("end drop all rows", job_context["job"].id, drop_start)
log2_start = log_state("start log2", job_context["job"].id)
# log2(x + 1) transform filtered_rnaseq_matrix; this is now log2_rnaseq_matrix
filtered_rnaseq_matrix_plus_one = filtered_rnaseq_matrix + 1
log2_rnaseq_matrix = np.log2(filtered_rnaseq_matrix_plus_one)
del filtered_rnaseq_matrix_plus_one
del filtered_rnaseq_matrix
log_state("end log2", job_context["job"].id, log2_start)
cache_start = log_state("start caching zeroes", job_context["job"].id)
# Cache our RNA-Seq zero values
cached_zeroes = {}
for column in log2_rnaseq_matrix.columns:
cached_zeroes[column] = log2_rnaseq_matrix.index[np.where(log2_rnaseq_matrix[column] == 0)]
# Set all zero values in log2_rnaseq_matrix to NA, but make sure
# to keep track of where these zeroes are
log2_rnaseq_matrix[log2_rnaseq_matrix == 0] = np.nan
log_state("end caching zeroes", job_context["job"].id, cache_start)
outer_merge_start = log_state("start outer merge", job_context["job"].id)
# Perform a full outer join of microarray_matrix and
# log2_rnaseq_matrix; combined_matrix
if log2_rnaseq_matrix is not None:
combined_matrix = job_context.pop('microarray_matrix').merge(log2_rnaseq_matrix,
how='outer',
left_index=True,
right_index=True)
else:
logger.info("Building compendia with only microarray data.", job_id=job_context["job"].id)
combined_matrix = job_context.pop('microarray_matrix')
log_state("ran outer merge, now deleteing log2_rnaseq_matrix", job_context["job"].id)
del log2_rnaseq_matrix
log_state("end outer merge", job_context["job"].id, outer_merge_start)
drop_na_genes_start = log_state("start drop NA genes", job_context["job"].id)
# # Visualize Prefiltered
# output_path = job_context['output_dir'] + "pre_filtered_" + str(time.time()) + ".png"
# visualized_prefilter = visualize.visualize(combined_matrix.copy(), output_path)
# Remove genes (rows) with <=70% present values in combined_matrix
thresh = combined_matrix.shape[1] * .7 # (Rows, Columns)
# Everything below `thresh` is dropped
row_filtered_matrix = combined_matrix.dropna(axis='index',
thresh=thresh)
del combined_matrix
del thresh
log_state("end drop NA genes", job_context["job"].id, drop_na_genes_start)
drop_na_samples_start = log_state("start drop NA samples", job_context["job"].id)
# # Visualize Row Filtered
# output_path = job_context['output_dir'] + "row_filtered_" + str(time.time()) + ".png"
# visualized_rowfilter = visualize.visualize(row_filtered_matrix.copy(), output_path)
# Remove samples (columns) with <50% present values in combined_matrix
# XXX: Find better test data for this!
col_thresh = row_filtered_matrix.shape[0] * .5
row_col_filtered_matrix_samples = row_filtered_matrix.dropna(axis='columns',
thresh=col_thresh)
row_col_filtered_matrix_samples_index = row_col_filtered_matrix_samples.index
row_col_filtered_matrix_samples_columns = row_col_filtered_matrix_samples.columns
log_state("end drop NA genes", job_context["job"].id, drop_na_samples_start)
replace_zeroes_start = log_state("start replace zeroes", job_context["job"].id)
del row_filtered_matrix
# # Visualize Row and Column Filtered
# output_path = job_context['output_dir'] + "row_col_filtered_" + str(time.time()) + ".png"
# visualized_rowcolfilter = visualize.visualize(row_col_filtered_matrix_samples.copy(),
# output_path)
# "Reset" zero values that were set to NA in RNA-seq samples
# (i.e., make these zero again) in combined_matrix
for column in cached_zeroes.keys():
zeroes = cached_zeroes[column]
# Skip purged columns
if column not in row_col_filtered_matrix_samples:
continue
# Place the zero
try:
# This generates a warning, so use loc[] instead
# row_col_filtered_matrix_samples[column].replace(zeroes, 0.0, inplace=True)
zeroes_list = zeroes.tolist()
new_index_list = row_col_filtered_matrix_samples_index.tolist()
new_zeroes = list(set(new_index_list) & set(zeroes_list))
row_col_filtered_matrix_samples[column].loc[new_zeroes] = 0.0
except Exception as e:
logger.warn("Error when replacing zero")
continue
log_state("end replace zeroes", job_context["job"].id, replace_zeroes_start)
transposed_zeroes_start = log_state("start replacing transposed zeroes", job_context["job"].id)
# Label our new replaced data
combined_matrix_zero = row_col_filtered_matrix_samples
del row_col_filtered_matrix_samples
transposed_matrix_with_zeros = combined_matrix_zero.T
del combined_matrix_zero
# Remove -inf and inf
# This should never happen, but make sure it doesn't!
transposed_matrix = transposed_matrix_with_zeros.replace([np.inf, -np.inf], np.nan)
del transposed_matrix_with_zeros
log_state("end replacing transposed zeroes", job_context["job"].id, transposed_zeroes_start)
# Store the absolute/percentages of imputed values
matrix_sum = transposed_matrix.isnull().sum()
percent = (matrix_sum / transposed_matrix.isnull().count()).sort_values(ascending=False)
total_percent_imputed = sum(percent) / len(transposed_matrix.count())
job_context['total_percent_imputed'] = total_percent_imputed
logger.info("Total percentage of data to impute!", total_percent_imputed=total_percent_imputed)
# Perform imputation of missing values with IterativeSVD (rank=10) on the
# transposed_matrix; imputed_matrix
svd_algorithm = job_context['dataset'].svd_algorithm
if svd_algorithm != 'NONE':
svd_start = log_state("start SVD", job_context["job"].id)
logger.info("IterativeSVD algorithm: %s" % svd_algorithm)
svd_algorithm = str.lower(svd_algorithm)
imputed_matrix = IterativeSVD(
rank=10,
svd_algorithm=svd_algorithm
).fit_transform(transposed_matrix)
svd_start = log_state("end SVD", job_context["job"].id, svd_start)
else:
imputed_matrix = transposed_matrix
logger.info("Skipping IterativeSVD")
del transposed_matrix
untranspose_start = log_state("start untranspose", job_context["job"].id)
# Untranspose imputed_matrix (genes are now rows, samples are now columns)
untransposed_imputed_matrix = imputed_matrix.T
del imputed_matrix
# Convert back to Pandas
untransposed_imputed_matrix_df = pd.DataFrame.from_records(untransposed_imputed_matrix)
untransposed_imputed_matrix_df.index = row_col_filtered_matrix_samples_index
untransposed_imputed_matrix_df.columns = row_col_filtered_matrix_samples_columns
del untransposed_imputed_matrix
del row_col_filtered_matrix_samples_index
del row_col_filtered_matrix_samples_columns
# Quantile normalize imputed_matrix where genes are rows and samples are columns
job_context['organism'] = Organism.get_object_for_name(job_context['organism_name'])
job_context['merged_no_qn'] = untransposed_imputed_matrix_df
# output_path = job_context['output_dir'] + "compendia_no_qn_" + str(time.time()) + ".png"
# visualized_merged_no_qn = visualize.visualize(untransposed_imputed_matrix_df.copy(),
# output_path)
log_state("end untranspose", job_context["job"].id, untranspose_start)
quantile_start = log_state("start quantile normalize", job_context["job"].id)
# Perform the Quantile Normalization
job_context = smashing_utils.quantile_normalize(job_context, ks_check=False)
log_state("end quantile normalize", job_context["job"].id, quantile_start)
# Visualize Final Compendia
# output_path = job_context['output_dir'] + "compendia_with_qn_" + str(time.time()) + ".png"
# visualized_merged_qn = visualize.visualize(job_context['merged_qn'].copy(), output_path)
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = "create_compendia.py"
log_state("end prepare imputation", job_context["job"].id, imputation_start)
return job_context
def _create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
result_start = log_state("start create result object", job_context["job"].id)
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
# Temporary until we re-enable the QN test step.
result.is_public = False
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_COMPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
organism_key = list(job_context['samples'].keys())[0]
annotation = ComputationalResultAnnotation()
annotation.result = result
annotation.data = {
"organism_id": job_context['samples'][organism_key][0].organism_id,
"organism_name": job_context['organism_name'],
"is_qn": False,
"is_compendia": True,
"samples": [sample.accession_code for sample in job_context["samples"][organism_key]],
"num_samples": len(job_context["samples"][organism_key]),
"experiment_accessions": [e.accession_code for e in job_context['experiments']],
"total_percent_imputed": job_context['total_percent_imputed']
}
annotation.save()
# Create the resulting archive
final_zip_base = SMASHING_DIR + str(job_context["dataset"].pk) + "_compendia"
# Copy LICENSE.txt and correct README.md files.
if job_context["dataset"].quant_sf_only:
readme_file = "/home/user/README_QUANT.md"
else:
readme_file = "/home/user/README_NORMALIZED.md"
shutil.copy(readme_file, job_context["output_dir"] + "/README.md")
shutil.copy("/home/user/LICENSE_DATASET.txt", job_context["output_dir"] + "/LICENSE.TXT")
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = archive_path.split('/')[-1]
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.save()
# Compendia Result Helpers
primary_organism = Organism.get_object_for_name(job_context['primary_organism'])
organisms = [Organism.get_object_for_name(organism) for organism in job_context["all_organisms"]]
organisms_filter = Q(organisms__in=organisms)
compendium_version = CompendiumResult.objects.filter(
primary_organism=primary_organism,
quant_sf_only=False
).count() + 1
# Save Compendia Result
compendium_result = CompendiumResult()
compendium_result.quant_sf_only = job_context["dataset"].quant_sf_only
compendium_result.svd_algorithm = job_context['dataset'].svd_algorithm
compendium_result.compendium_version = compendium_version
compendium_result.result = result
compendium_result.primary_organism = primary_organism
compendium_result.save()
# create relations to all organisms contained in the compendia
compendium_result_organism_associations = []
for compendium_organism in organisms:
compendium_result_organism_association = CompendiumResultOrganismAssociation()
compendium_result_organism_association.compendium_result = compendium_result
compendium_result_organism_association.organism = compendium_organism
compendium_result_organism_associations.append(
compendium_result_organism_association)
CompendiumResultOrganismAssociation.objects.bulk_create(
compendium_result_organism_associations)
job_context['compendium_result'] = compendium_result
logger.info("Compendium created!",
archive_path=archive_path,
organism_name=job_context['organism_name'])
# Upload the result to S3
timestamp = str(int(time.time()))
key = job_context['organism_name'] + "_" + str(compendium_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, key)
job_context['result'] = result
job_context['computed_files'] = [archive_computed_file]
job_context['success'] = True
log_state("end create result object", job_context["job"].id, result_start)
# TEMPORARY for iterating on compendia more quickly.
# Reset this so the end_job does clean up the job's non-input-data stuff.
job_context["work_dir"] = job_context["old_work_dir"]
return job_context
def create_compendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_COMPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
_prepare_input,
_prepare_frames,
_perform_imputation,
smashing_utils.write_non_data_files,
_create_result_objects,
utils.end_job])
return job_context
<|code_end|>
| workers/data_refinery_workers/processors/create_compendia.py
<|code_start|>import logging
import os
import shutil
import time
import itertools
from typing import Dict
from django.utils import timezone
from django.db.models import Q, Count
from fancyimpute import IterativeSVD
import numpy as np
import pandas as pd
import psutil
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputationalResultAnnotation,
CompendiumResult,
CompendiumResultOrganismAssociation,
ComputedFile,
Organism,
Pipeline)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
pd.set_option('mode.chained_assignment', None)
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BYTES_IN_GB = 1024 * 1024 * 1024
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName('DEBUG'))
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message,
total_cpu=psutil.cpu_percent(),
process_ram=ram_in_GB,
job_id=job_id)
if start_time:
logger.debug('Duration: %s' % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _prepare_input(job_context: Dict) -> Dict:
start_time = log_state("prepare input", job_context["job"].id)
job_context["primary_organism"] = max(job_context["samples"],
key=lambda organism:len(job_context["samples"][organism]))
job_context["all_organisms"] = job_context["samples"].keys()
all_samples = list(itertools.chain(*job_context["samples"].values()))
job_context["samples"] = {job_context["primary_organism"]: all_samples}
job_context = smashing_utils.prepare_files(job_context)
# Compendia jobs only run for one organism, so we know the only
# key will be the organism name, unless of course we've already failed.
if job_context['job'].success is not False:
job_context["organism_name"] = job_context['group_by_keys'][0]
# TEMPORARY for iterating on compendia more quickly. Rather
# than downloading the data from S3 each run we're just gonna
# use the same directory every job.
job_context["old_work_dir"] = job_context["work_dir"]
job_context["work_dir"] = SMASHING_DIR + job_context["organism_name"] + "/"
if not os.path.exists(job_context["work_dir"]):
os.makedirs(job_context["work_dir"])
log_state("prepare input done", job_context["job"].id, start_time)
return job_context
def _prepare_frames(job_context: Dict) -> Dict:
start_prepare_frames = log_state("start _prepare_frames", job_context["job"].id)
job_context['unsmashable_files'] = []
job_context['num_samples'] = 0
# Smash all of the sample sets
logger.debug("About to smash!",
dataset_count=len(job_context['dataset'].data),
job_id=job_context['job'].id)
try:
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop('input_files').items():
job_context = smashing_utils.process_frames_for_key(key, input_files, job_context)
# if len(job_context['all_frames']) < 1:
# TODO: Enable this check?
except Exception as e:
raise utils.ProcessorJobError("Could not prepare frames for compendia.",
success=False,
dataset_id=job_context['dataset'].id,
processor_job_id=job_context['job_id'],
num_input_files=job_context['num_input_files'])
job_context['dataset'].success = True
job_context['dataset'].save()
log_state("end _prepare_frames", job_context["job"].id, start_prepare_frames)
return job_context
def _perform_imputation(job_context: Dict) -> Dict:
"""
Take the inputs and perform the primary imputation.
Via https://github.com/AlexsLemonade/refinebio/issues/508#issuecomment-435879283:
- Combine all microarray samples with a full join to form a
microarray_expression_matrix (this may end up being a DataFrame).
- Combine all RNA-seq samples (lengthScaledTPM) with a full outer join
to form a rnaseq_expression_matrix.
- Calculate the sum of the lengthScaledTPM values for each row (gene) of
the rnaseq_expression_matrix (rnaseq_row_sums).
- Calculate the 10th percentile of rnaseq_row_sums
- Drop all rows in rnaseq_expression_matrix with a row sum < 10th percentile of
rnaseq_row_sums; this is now filtered_rnaseq_matrix
- log2(x + 1) transform filtered_rnaseq_matrix; this is now log2_rnaseq_matrix
- Set all zero values in log2_rnaseq_matrix to NA, but make sure to keep track of
where these zeroes are
- Perform a full outer join of microarray_expression_matrix and
log2_rnaseq_matrix; combined_matrix
- Remove genes (rows) with >30% missing values in combined_matrix
- Remove samples (columns) with >50% missing values in combined_matrix
- "Reset" zero values that were set to NA in RNA-seq samples (i.e., make these zero
again) in combined_matrix
- Transpose combined_matrix; transposed_matrix
- Perform imputation of missing values with IterativeSVD (rank=10) on
the transposed_matrix; imputed_matrix
-- with specified svd algorithm or skip
- Untranspose imputed_matrix (genes are now rows, samples are now columns)
- Quantile normalize imputed_matrix where genes are rows and samples are columns
"""
imputation_start = log_state("start perform imputation", job_context["job"].id)
job_context['time_start'] = timezone.now()
rnaseq_row_sums_start = log_state("start rnaseq row sums", job_context["job"].id)
# We potentially can have a microarray-only compendia but not a RNASeq-only compendia
log2_rnaseq_matrix = None
if job_context['rnaseq_matrix'] is not None:
# Drop any genes that are entirely NULL in the RNA-Seq matrix
job_context['rnaseq_matrix'] = job_context['rnaseq_matrix'].dropna(axis='columns',
how='all')
# Calculate the sum of the lengthScaledTPM values for each row
# (gene) of the rnaseq_matrix (rnaseq_row_sums)
rnaseq_row_sums = np.sum(job_context['rnaseq_matrix'], axis=1)
log_state("end rnaseq row sums", job_context["job"].id, rnaseq_row_sums_start)
rnaseq_decile_start = log_state("start rnaseq decile", job_context["job"].id)
# Calculate the 10th percentile of rnaseq_row_sums
rnaseq_tenth_percentile = np.percentile(rnaseq_row_sums, 10)
log_state("end rnaseq decile", job_context["job"].id, rnaseq_decile_start)
drop_start = log_state("drop all rows", job_context["job"].id)
# Drop all rows in rnaseq_matrix with a row sum < 10th
# percentile of rnaseq_row_sums; this is now
# filtered_rnaseq_matrix
# TODO: This is probably a better way to do this with `np.where`
rows_to_filter = []
for (x, sum_val) in rnaseq_row_sums.items():
if sum_val < rnaseq_tenth_percentile:
rows_to_filter.append(x)
del rnaseq_row_sums
log_state("actually calling drop()", job_context["job"].id)
filtered_rnaseq_matrix = job_context.pop('rnaseq_matrix').drop(rows_to_filter)
del rows_to_filter
log_state("end drop all rows", job_context["job"].id, drop_start)
log2_start = log_state("start log2", job_context["job"].id)
# log2(x + 1) transform filtered_rnaseq_matrix; this is now log2_rnaseq_matrix
filtered_rnaseq_matrix_plus_one = filtered_rnaseq_matrix + 1
log2_rnaseq_matrix = np.log2(filtered_rnaseq_matrix_plus_one)
del filtered_rnaseq_matrix_plus_one
del filtered_rnaseq_matrix
log_state("end log2", job_context["job"].id, log2_start)
cache_start = log_state("start caching zeroes", job_context["job"].id)
# Cache our RNA-Seq zero values
cached_zeroes = {}
for column in log2_rnaseq_matrix.columns:
cached_zeroes[column] = log2_rnaseq_matrix.index[np.where(log2_rnaseq_matrix[column] == 0)]
# Set all zero values in log2_rnaseq_matrix to NA, but make sure
# to keep track of where these zeroes are
log2_rnaseq_matrix[log2_rnaseq_matrix == 0] = np.nan
log_state("end caching zeroes", job_context["job"].id, cache_start)
outer_merge_start = log_state("start outer merge", job_context["job"].id)
# Perform a full outer join of microarray_matrix and
# log2_rnaseq_matrix; combined_matrix
if log2_rnaseq_matrix is not None:
combined_matrix = job_context.pop('microarray_matrix').merge(log2_rnaseq_matrix,
how='outer',
left_index=True,
right_index=True)
else:
logger.info("Building compendia with only microarray data.", job_id=job_context["job"].id)
combined_matrix = job_context.pop('microarray_matrix')
log_state("ran outer merge, now deleteing log2_rnaseq_matrix", job_context["job"].id)
del log2_rnaseq_matrix
log_state("end outer merge", job_context["job"].id, outer_merge_start)
drop_na_genes_start = log_state("start drop NA genes", job_context["job"].id)
# # Visualize Prefiltered
# output_path = job_context['output_dir'] + "pre_filtered_" + str(time.time()) + ".png"
# visualized_prefilter = visualize.visualize(combined_matrix.copy(), output_path)
# Remove genes (rows) with <=70% present values in combined_matrix
thresh = combined_matrix.shape[1] * .7 # (Rows, Columns)
# Everything below `thresh` is dropped
row_filtered_matrix = combined_matrix.dropna(axis='index',
thresh=thresh)
del combined_matrix
del thresh
log_state("end drop NA genes", job_context["job"].id, drop_na_genes_start)
drop_na_samples_start = log_state("start drop NA samples", job_context["job"].id)
# # Visualize Row Filtered
# output_path = job_context['output_dir'] + "row_filtered_" + str(time.time()) + ".png"
# visualized_rowfilter = visualize.visualize(row_filtered_matrix.copy(), output_path)
# Remove samples (columns) with <50% present values in combined_matrix
# XXX: Find better test data for this!
col_thresh = row_filtered_matrix.shape[0] * .5
row_col_filtered_matrix_samples = row_filtered_matrix.dropna(axis='columns',
thresh=col_thresh)
row_col_filtered_matrix_samples_index = row_col_filtered_matrix_samples.index
row_col_filtered_matrix_samples_columns = row_col_filtered_matrix_samples.columns
log_state("end drop NA genes", job_context["job"].id, drop_na_samples_start)
replace_zeroes_start = log_state("start replace zeroes", job_context["job"].id)
del row_filtered_matrix
# # Visualize Row and Column Filtered
# output_path = job_context['output_dir'] + "row_col_filtered_" + str(time.time()) + ".png"
# visualized_rowcolfilter = visualize.visualize(row_col_filtered_matrix_samples.copy(),
# output_path)
# "Reset" zero values that were set to NA in RNA-seq samples
# (i.e., make these zero again) in combined_matrix
for column in cached_zeroes.keys():
zeroes = cached_zeroes[column]
# Skip purged columns
if column not in row_col_filtered_matrix_samples:
continue
# Place the zero
try:
# This generates a warning, so use loc[] instead
# row_col_filtered_matrix_samples[column].replace(zeroes, 0.0, inplace=True)
zeroes_list = zeroes.tolist()
new_index_list = row_col_filtered_matrix_samples_index.tolist()
new_zeroes = list(set(new_index_list) & set(zeroes_list))
row_col_filtered_matrix_samples[column].loc[new_zeroes] = 0.0
except Exception as e:
logger.warn("Error when replacing zero")
continue
log_state("end replace zeroes", job_context["job"].id, replace_zeroes_start)
transposed_zeroes_start = log_state("start replacing transposed zeroes", job_context["job"].id)
# Label our new replaced data
combined_matrix_zero = row_col_filtered_matrix_samples
del row_col_filtered_matrix_samples
transposed_matrix_with_zeros = combined_matrix_zero.T
del combined_matrix_zero
# Remove -inf and inf
# This should never happen, but make sure it doesn't!
transposed_matrix = transposed_matrix_with_zeros.replace([np.inf, -np.inf], np.nan)
del transposed_matrix_with_zeros
log_state("end replacing transposed zeroes", job_context["job"].id, transposed_zeroes_start)
# Store the absolute/percentages of imputed values
matrix_sum = transposed_matrix.isnull().sum()
percent = (matrix_sum / transposed_matrix.isnull().count()).sort_values(ascending=False)
total_percent_imputed = sum(percent) / len(transposed_matrix.count())
job_context['total_percent_imputed'] = total_percent_imputed
logger.info("Total percentage of data to impute!", total_percent_imputed=total_percent_imputed)
# Perform imputation of missing values with IterativeSVD (rank=10) on the
# transposed_matrix; imputed_matrix
svd_algorithm = job_context['dataset'].svd_algorithm
if svd_algorithm != 'NONE':
svd_start = log_state("start SVD", job_context["job"].id)
logger.info("IterativeSVD algorithm: %s" % svd_algorithm)
svd_algorithm = str.lower(svd_algorithm)
imputed_matrix = IterativeSVD(
rank=10,
svd_algorithm=svd_algorithm
).fit_transform(transposed_matrix)
svd_start = log_state("end SVD", job_context["job"].id, svd_start)
else:
imputed_matrix = transposed_matrix
logger.info("Skipping IterativeSVD")
del transposed_matrix
untranspose_start = log_state("start untranspose", job_context["job"].id)
# Untranspose imputed_matrix (genes are now rows, samples are now columns)
untransposed_imputed_matrix = imputed_matrix.T
del imputed_matrix
# Convert back to Pandas
untransposed_imputed_matrix_df = pd.DataFrame.from_records(untransposed_imputed_matrix)
untransposed_imputed_matrix_df.index = row_col_filtered_matrix_samples_index
untransposed_imputed_matrix_df.columns = row_col_filtered_matrix_samples_columns
del untransposed_imputed_matrix
del row_col_filtered_matrix_samples_index
del row_col_filtered_matrix_samples_columns
# Quantile normalize imputed_matrix where genes are rows and samples are columns
job_context['organism'] = Organism.get_object_for_name(job_context['organism_name'])
job_context['merged_no_qn'] = untransposed_imputed_matrix_df
# output_path = job_context['output_dir'] + "compendia_no_qn_" + str(time.time()) + ".png"
# visualized_merged_no_qn = visualize.visualize(untransposed_imputed_matrix_df.copy(),
# output_path)
log_state("end untranspose", job_context["job"].id, untranspose_start)
quantile_start = log_state("start quantile normalize", job_context["job"].id)
# Perform the Quantile Normalization
job_context = smashing_utils.quantile_normalize(job_context, ks_check=False)
log_state("end quantile normalize", job_context["job"].id, quantile_start)
# Visualize Final Compendia
# output_path = job_context['output_dir'] + "compendia_with_qn_" + str(time.time()) + ".png"
# visualized_merged_qn = visualize.visualize(job_context['merged_qn'].copy(), output_path)
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = ["create_compendia.py"]
log_state("end prepare imputation", job_context["job"].id, imputation_start)
return job_context
def _create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
result_start = log_state("start create result object", job_context["job"].id)
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
# Temporary until we re-enable the QN test step.
result.is_public = False
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_COMPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
# Write the compendia dataframe to a file
job_context['csv_outfile'] = job_context['output_dir'] + job_context['organism_name'] + '.tsv'
job_context['merged_qn'].to_csv(job_context['csv_outfile'], sep='\t', encoding='utf-8')
organism_key = list(job_context['samples'].keys())[0]
annotation = ComputationalResultAnnotation()
annotation.result = result
annotation.data = {
"organism_id": job_context['samples'][organism_key][0].organism_id,
"organism_name": job_context['organism_name'],
"is_qn": False,
"is_compendia": True,
"samples": [sample.accession_code for sample in job_context["samples"][organism_key]],
"num_samples": len(job_context["samples"][organism_key]),
"experiment_accessions": [e.accession_code for e in job_context['experiments']],
"total_percent_imputed": job_context['total_percent_imputed']
}
annotation.save()
# Create the resulting archive
final_zip_base = SMASHING_DIR + str(job_context["dataset"].pk) + "_compendia"
# Copy LICENSE.txt and correct README.md files.
if job_context["dataset"].quant_sf_only:
readme_file = "/home/user/README_QUANT.md"
else:
readme_file = "/home/user/README_NORMALIZED.md"
shutil.copy(readme_file, job_context["output_dir"] + "/README.md")
shutil.copy("/home/user/LICENSE_DATASET.txt", job_context["output_dir"] + "/LICENSE.TXT")
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = archive_path.split('/')[-1]
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.save()
# Compendia Result Helpers
primary_organism = Organism.get_object_for_name(job_context['primary_organism'])
organisms = [Organism.get_object_for_name(organism) for organism in job_context["all_organisms"]]
compendium_version = CompendiumResult.objects.filter(
primary_organism=primary_organism,
quant_sf_only=False
).count() + 1
# Save Compendia Result
compendium_result = CompendiumResult()
compendium_result.quant_sf_only = job_context["dataset"].quant_sf_only
compendium_result.svd_algorithm = job_context['dataset'].svd_algorithm
compendium_result.compendium_version = compendium_version
compendium_result.result = result
compendium_result.primary_organism = primary_organism
compendium_result.save()
# create relations to all organisms contained in the compendia
compendium_result_organism_associations = []
for compendium_organism in organisms:
compendium_result_organism_association = CompendiumResultOrganismAssociation()
compendium_result_organism_association.compendium_result = compendium_result
compendium_result_organism_association.organism = compendium_organism
compendium_result_organism_associations.append(
compendium_result_organism_association)
CompendiumResultOrganismAssociation.objects.bulk_create(
compendium_result_organism_associations)
job_context['compendium_result'] = compendium_result
logger.info("Compendium created!",
archive_path=archive_path,
organism_name=job_context['organism_name'])
# Upload the result to S3
timestamp = str(int(time.time()))
key = job_context['organism_name'] + "_" + str(compendium_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, key)
job_context['result'] = result
job_context['computed_files'] = [archive_computed_file]
job_context['success'] = True
log_state("end create result object", job_context["job"].id, result_start)
# TEMPORARY for iterating on compendia more quickly.
# Reset this so the end_job does clean up the job's non-input-data stuff.
job_context["work_dir"] = job_context["old_work_dir"]
return job_context
def create_compendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_COMPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
_prepare_input,
_prepare_frames,
_perform_imputation,
smashing_utils.write_non_data_files,
_create_result_objects,
utils.end_job])
return job_context
<|code_end|>
|
Record which samples end up making it into a compendia
### Context
As we build compendia we drop some samples that aren't suitable for various reasons. However the only record in our database that tracks what samples are in it is the dataset object which has every sample including those that get dropped.
### Problem or idea
We should track this... _somehow_
### Solution or next step
We should already have this information in the compendia's data because during the first pass over the samples we keep track of the gene ids, the microarray samples, and the rnaseq samples. It's just a matter of actually getting this into the database.
| workers/data_refinery_workers/processors/create_compendia.py
<|code_start|>import logging
import os
import shutil
import time
import itertools
from typing import Dict
from django.utils import timezone
from django.db.models import Q, Count
from fancyimpute import IterativeSVD
import numpy as np
import pandas as pd
import psutil
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputationalResultAnnotation,
CompendiumResult,
CompendiumResultOrganismAssociation,
ComputedFile,
Organism,
Pipeline)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
pd.set_option('mode.chained_assignment', None)
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BYTES_IN_GB = 1024 * 1024 * 1024
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName('DEBUG'))
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message,
total_cpu=psutil.cpu_percent(),
process_ram=ram_in_GB,
job_id=job_id)
if start_time:
logger.debug('Duration: %s' % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _prepare_input(job_context: Dict) -> Dict:
start_time = log_state("prepare input", job_context["job"].id)
job_context["primary_organism"] = max(job_context["samples"],
key=lambda organism:len(job_context["samples"][organism]))
job_context["all_organisms"] = job_context["samples"].keys()
all_samples = list(itertools.chain(*job_context["samples"].values()))
job_context["samples"] = {job_context["primary_organism"]: all_samples}
job_context = smashing_utils.prepare_files(job_context)
# Compendia jobs only run for one organism, so we know the only
# key will be the organism name, unless of course we've already failed.
if job_context['job'].success is not False:
job_context["organism_name"] = job_context['group_by_keys'][0]
# TEMPORARY for iterating on compendia more quickly. Rather
# than downloading the data from S3 each run we're just gonna
# use the same directory every job.
job_context["old_work_dir"] = job_context["work_dir"]
job_context["work_dir"] = SMASHING_DIR + job_context["organism_name"] + "/"
if not os.path.exists(job_context["work_dir"]):
os.makedirs(job_context["work_dir"])
log_state("prepare input done", job_context["job"].id, start_time)
return job_context
def _prepare_frames(job_context: Dict) -> Dict:
start_prepare_frames = log_state("start _prepare_frames", job_context["job"].id)
job_context['unsmashable_files'] = []
job_context['num_samples'] = 0
# Smash all of the sample sets
logger.debug("About to smash!",
dataset_count=len(job_context['dataset'].data),
job_id=job_context['job'].id)
try:
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop('input_files').items():
job_context = smashing_utils.process_frames_for_key(key, input_files, job_context)
# if len(job_context['all_frames']) < 1:
# TODO: Enable this check?
except Exception as e:
raise utils.ProcessorJobError("Could not prepare frames for compendia.",
success=False,
dataset_id=job_context['dataset'].id,
processor_job_id=job_context['job_id'],
num_input_files=job_context['num_input_files'])
job_context['dataset'].success = True
job_context['dataset'].save()
log_state("end _prepare_frames", job_context["job"].id, start_prepare_frames)
return job_context
def _perform_imputation(job_context: Dict) -> Dict:
"""
Take the inputs and perform the primary imputation.
Via https://github.com/AlexsLemonade/refinebio/issues/508#issuecomment-435879283:
- Combine all microarray samples with a full join to form a
microarray_expression_matrix (this may end up being a DataFrame).
- Combine all RNA-seq samples (lengthScaledTPM) with a full outer join
to form a rnaseq_expression_matrix.
- Calculate the sum of the lengthScaledTPM values for each row (gene) of
the rnaseq_expression_matrix (rnaseq_row_sums).
- Calculate the 10th percentile of rnaseq_row_sums
- Drop all rows in rnaseq_expression_matrix with a row sum < 10th percentile of
rnaseq_row_sums; this is now filtered_rnaseq_matrix
- log2(x + 1) transform filtered_rnaseq_matrix; this is now log2_rnaseq_matrix
- Set all zero values in log2_rnaseq_matrix to NA, but make sure to keep track of
where these zeroes are
- Perform a full outer join of microarray_expression_matrix and
log2_rnaseq_matrix; combined_matrix
- Remove genes (rows) with >30% missing values in combined_matrix
- Remove samples (columns) with >50% missing values in combined_matrix
- "Reset" zero values that were set to NA in RNA-seq samples (i.e., make these zero
again) in combined_matrix
- Transpose combined_matrix; transposed_matrix
- Perform imputation of missing values with IterativeSVD (rank=10) on
the transposed_matrix; imputed_matrix
-- with specified svd algorithm or skip
- Untranspose imputed_matrix (genes are now rows, samples are now columns)
- Quantile normalize imputed_matrix where genes are rows and samples are columns
"""
imputation_start = log_state("start perform imputation", job_context["job"].id)
job_context['time_start'] = timezone.now()
rnaseq_row_sums_start = log_state("start rnaseq row sums", job_context["job"].id)
# We potentially can have a microarray-only compendia but not a RNASeq-only compendia
log2_rnaseq_matrix = None
if job_context['rnaseq_matrix'] is not None:
# Drop any genes that are entirely NULL in the RNA-Seq matrix
job_context['rnaseq_matrix'] = job_context['rnaseq_matrix'].dropna(axis='columns',
how='all')
# Calculate the sum of the lengthScaledTPM values for each row
# (gene) of the rnaseq_matrix (rnaseq_row_sums)
rnaseq_row_sums = np.sum(job_context['rnaseq_matrix'], axis=1)
log_state("end rnaseq row sums", job_context["job"].id, rnaseq_row_sums_start)
rnaseq_decile_start = log_state("start rnaseq decile", job_context["job"].id)
# Calculate the 10th percentile of rnaseq_row_sums
rnaseq_tenth_percentile = np.percentile(rnaseq_row_sums, 10)
log_state("end rnaseq decile", job_context["job"].id, rnaseq_decile_start)
drop_start = log_state("drop all rows", job_context["job"].id)
# Drop all rows in rnaseq_matrix with a row sum < 10th
# percentile of rnaseq_row_sums; this is now
# filtered_rnaseq_matrix
# TODO: This is probably a better way to do this with `np.where`
rows_to_filter = []
for (x, sum_val) in rnaseq_row_sums.items():
if sum_val < rnaseq_tenth_percentile:
rows_to_filter.append(x)
del rnaseq_row_sums
log_state("actually calling drop()", job_context["job"].id)
filtered_rnaseq_matrix = job_context.pop('rnaseq_matrix').drop(rows_to_filter)
del rows_to_filter
log_state("end drop all rows", job_context["job"].id, drop_start)
log2_start = log_state("start log2", job_context["job"].id)
# log2(x + 1) transform filtered_rnaseq_matrix; this is now log2_rnaseq_matrix
filtered_rnaseq_matrix_plus_one = filtered_rnaseq_matrix + 1
log2_rnaseq_matrix = np.log2(filtered_rnaseq_matrix_plus_one)
del filtered_rnaseq_matrix_plus_one
del filtered_rnaseq_matrix
log_state("end log2", job_context["job"].id, log2_start)
cache_start = log_state("start caching zeroes", job_context["job"].id)
# Cache our RNA-Seq zero values
cached_zeroes = {}
for column in log2_rnaseq_matrix.columns:
cached_zeroes[column] = log2_rnaseq_matrix.index[np.where(log2_rnaseq_matrix[column] == 0)]
# Set all zero values in log2_rnaseq_matrix to NA, but make sure
# to keep track of where these zeroes are
log2_rnaseq_matrix[log2_rnaseq_matrix == 0] = np.nan
log_state("end caching zeroes", job_context["job"].id, cache_start)
outer_merge_start = log_state("start outer merge", job_context["job"].id)
# Perform a full outer join of microarray_matrix and
# log2_rnaseq_matrix; combined_matrix
if log2_rnaseq_matrix is not None:
combined_matrix = job_context.pop('microarray_matrix').merge(log2_rnaseq_matrix,
how='outer',
left_index=True,
right_index=True)
else:
logger.info("Building compendia with only microarray data.", job_id=job_context["job"].id)
combined_matrix = job_context.pop('microarray_matrix')
log_state("ran outer merge, now deleteing log2_rnaseq_matrix", job_context["job"].id)
del log2_rnaseq_matrix
log_state("end outer merge", job_context["job"].id, outer_merge_start)
drop_na_genes_start = log_state("start drop NA genes", job_context["job"].id)
# # Visualize Prefiltered
# output_path = job_context['output_dir'] + "pre_filtered_" + str(time.time()) + ".png"
# visualized_prefilter = visualize.visualize(combined_matrix.copy(), output_path)
# Remove genes (rows) with <=70% present values in combined_matrix
thresh = combined_matrix.shape[1] * .7 # (Rows, Columns)
# Everything below `thresh` is dropped
row_filtered_matrix = combined_matrix.dropna(axis='index',
thresh=thresh)
del combined_matrix
del thresh
log_state("end drop NA genes", job_context["job"].id, drop_na_genes_start)
drop_na_samples_start = log_state("start drop NA samples", job_context["job"].id)
# # Visualize Row Filtered
# output_path = job_context['output_dir'] + "row_filtered_" + str(time.time()) + ".png"
# visualized_rowfilter = visualize.visualize(row_filtered_matrix.copy(), output_path)
# Remove samples (columns) with <50% present values in combined_matrix
# XXX: Find better test data for this!
col_thresh = row_filtered_matrix.shape[0] * .5
row_col_filtered_matrix_samples = row_filtered_matrix.dropna(axis='columns',
thresh=col_thresh)
row_col_filtered_matrix_samples_index = row_col_filtered_matrix_samples.index
row_col_filtered_matrix_samples_columns = row_col_filtered_matrix_samples.columns
log_state("end drop NA genes", job_context["job"].id, drop_na_samples_start)
replace_zeroes_start = log_state("start replace zeroes", job_context["job"].id)
del row_filtered_matrix
# # Visualize Row and Column Filtered
# output_path = job_context['output_dir'] + "row_col_filtered_" + str(time.time()) + ".png"
# visualized_rowcolfilter = visualize.visualize(row_col_filtered_matrix_samples.copy(),
# output_path)
# "Reset" zero values that were set to NA in RNA-seq samples
# (i.e., make these zero again) in combined_matrix
for column in cached_zeroes.keys():
zeroes = cached_zeroes[column]
# Skip purged columns
if column not in row_col_filtered_matrix_samples:
continue
# Place the zero
try:
# This generates a warning, so use loc[] instead
# row_col_filtered_matrix_samples[column].replace(zeroes, 0.0, inplace=True)
zeroes_list = zeroes.tolist()
new_index_list = row_col_filtered_matrix_samples_index.tolist()
new_zeroes = list(set(new_index_list) & set(zeroes_list))
row_col_filtered_matrix_samples[column].loc[new_zeroes] = 0.0
except Exception as e:
logger.warn("Error when replacing zero")
continue
log_state("end replace zeroes", job_context["job"].id, replace_zeroes_start)
transposed_zeroes_start = log_state("start replacing transposed zeroes", job_context["job"].id)
# Label our new replaced data
combined_matrix_zero = row_col_filtered_matrix_samples
del row_col_filtered_matrix_samples
transposed_matrix_with_zeros = combined_matrix_zero.T
del combined_matrix_zero
# Remove -inf and inf
# This should never happen, but make sure it doesn't!
transposed_matrix = transposed_matrix_with_zeros.replace([np.inf, -np.inf], np.nan)
del transposed_matrix_with_zeros
log_state("end replacing transposed zeroes", job_context["job"].id, transposed_zeroes_start)
# Store the absolute/percentages of imputed values
matrix_sum = transposed_matrix.isnull().sum()
percent = (matrix_sum / transposed_matrix.isnull().count()).sort_values(ascending=False)
total_percent_imputed = sum(percent) / len(transposed_matrix.count())
job_context['total_percent_imputed'] = total_percent_imputed
logger.info("Total percentage of data to impute!", total_percent_imputed=total_percent_imputed)
# Perform imputation of missing values with IterativeSVD (rank=10) on the
# transposed_matrix; imputed_matrix
svd_algorithm = job_context['dataset'].svd_algorithm
if svd_algorithm != 'NONE':
svd_start = log_state("start SVD", job_context["job"].id)
logger.info("IterativeSVD algorithm: %s" % svd_algorithm)
svd_algorithm = str.lower(svd_algorithm)
imputed_matrix = IterativeSVD(
rank=10,
svd_algorithm=svd_algorithm
).fit_transform(transposed_matrix)
svd_start = log_state("end SVD", job_context["job"].id, svd_start)
else:
imputed_matrix = transposed_matrix
logger.info("Skipping IterativeSVD")
del transposed_matrix
untranspose_start = log_state("start untranspose", job_context["job"].id)
# Untranspose imputed_matrix (genes are now rows, samples are now columns)
untransposed_imputed_matrix = imputed_matrix.T
del imputed_matrix
# Convert back to Pandas
untransposed_imputed_matrix_df = pd.DataFrame.from_records(untransposed_imputed_matrix)
untransposed_imputed_matrix_df.index = row_col_filtered_matrix_samples_index
untransposed_imputed_matrix_df.columns = row_col_filtered_matrix_samples_columns
del untransposed_imputed_matrix
del row_col_filtered_matrix_samples_index
del row_col_filtered_matrix_samples_columns
# Quantile normalize imputed_matrix where genes are rows and samples are columns
job_context['organism'] = Organism.get_object_for_name(job_context['organism_name'])
job_context['merged_no_qn'] = untransposed_imputed_matrix_df
# output_path = job_context['output_dir'] + "compendia_no_qn_" + str(time.time()) + ".png"
# visualized_merged_no_qn = visualize.visualize(untransposed_imputed_matrix_df.copy(),
# output_path)
log_state("end untranspose", job_context["job"].id, untranspose_start)
quantile_start = log_state("start quantile normalize", job_context["job"].id)
# Perform the Quantile Normalization
job_context = smashing_utils.quantile_normalize(job_context, ks_check=False)
log_state("end quantile normalize", job_context["job"].id, quantile_start)
# Visualize Final Compendia
# output_path = job_context['output_dir'] + "compendia_with_qn_" + str(time.time()) + ".png"
# visualized_merged_qn = visualize.visualize(job_context['merged_qn'].copy(), output_path)
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = ["create_compendia.py"]
log_state("end prepare imputation", job_context["job"].id, imputation_start)
return job_context
def _create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
result_start = log_state("start create result object", job_context["job"].id)
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
# Temporary until we re-enable the QN test step.
result.is_public = False
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_COMPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
# Write the compendia dataframe to a file
job_context['csv_outfile'] = job_context['output_dir'] + job_context['organism_name'] + '.tsv'
job_context['merged_qn'].to_csv(job_context['csv_outfile'], sep='\t', encoding='utf-8')
organism_key = list(job_context['samples'].keys())[0]
annotation = ComputationalResultAnnotation()
annotation.result = result
annotation.data = {
"organism_id": job_context['samples'][organism_key][0].organism_id,
"organism_name": job_context['organism_name'],
"is_qn": False,
"is_compendia": True,
"samples": [sample.accession_code for sample in job_context["samples"][organism_key]],
"num_samples": len(job_context["samples"][organism_key]),
"experiment_accessions": [e.accession_code for e in job_context['experiments']],
"total_percent_imputed": job_context['total_percent_imputed']
}
annotation.save()
# Create the resulting archive
final_zip_base = SMASHING_DIR + str(job_context["dataset"].pk) + "_compendia"
# Copy LICENSE.txt and correct README.md files.
if job_context["dataset"].quant_sf_only:
readme_file = "/home/user/README_QUANT.md"
else:
readme_file = "/home/user/README_NORMALIZED.md"
shutil.copy(readme_file, job_context["output_dir"] + "/README.md")
shutil.copy("/home/user/LICENSE_DATASET.txt", job_context["output_dir"] + "/LICENSE.TXT")
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = archive_path.split('/')[-1]
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.save()
# Compendia Result Helpers
primary_organism = Organism.get_object_for_name(job_context['primary_organism'])
organisms = [Organism.get_object_for_name(organism) for organism in job_context["all_organisms"]]
compendium_version = CompendiumResult.objects.filter(
primary_organism=primary_organism,
quant_sf_only=False
).count() + 1
# Save Compendia Result
compendium_result = CompendiumResult()
compendium_result.quant_sf_only = job_context["dataset"].quant_sf_only
compendium_result.svd_algorithm = job_context['dataset'].svd_algorithm
compendium_result.compendium_version = compendium_version
compendium_result.result = result
compendium_result.primary_organism = primary_organism
compendium_result.save()
# create relations to all organisms contained in the compendia
compendium_result_organism_associations = []
for compendium_organism in organisms:
compendium_result_organism_association = CompendiumResultOrganismAssociation()
compendium_result_organism_association.compendium_result = compendium_result
compendium_result_organism_association.organism = compendium_organism
compendium_result_organism_associations.append(
compendium_result_organism_association)
CompendiumResultOrganismAssociation.objects.bulk_create(
compendium_result_organism_associations)
job_context['compendium_result'] = compendium_result
logger.info("Compendium created!",
archive_path=archive_path,
organism_name=job_context['organism_name'])
# Upload the result to S3
timestamp = str(int(time.time()))
key = job_context['organism_name'] + "_" + str(compendium_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, key)
job_context['result'] = result
job_context['computed_files'] = [archive_computed_file]
job_context['success'] = True
log_state("end create result object", job_context["job"].id, result_start)
# TEMPORARY for iterating on compendia more quickly.
# Reset this so the end_job does clean up the job's non-input-data stuff.
job_context["work_dir"] = job_context["old_work_dir"]
return job_context
def create_compendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_COMPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
_prepare_input,
_prepare_frames,
_perform_imputation,
smashing_utils.write_non_data_files,
_create_result_objects,
utils.end_job])
return job_context
<|code_end|>
workers/data_refinery_workers/processors/create_quantpendia.py
<|code_start|>import os
import logging
import shutil
import time
from typing import Dict, List
import psutil
from django.utils import timezone
from django.conf import settings
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputedFile,
Organism,
Pipeline,
Sample,
CompendiumResult)
from data_refinery_common.utils import get_env_variable, FileUtils
from data_refinery_workers.processors import smashing_utils, utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
logger.setLevel(logging.getLevelName('DEBUG'))
def create_quantpendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_QUANTPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
_make_dirs,
_download_files,
_add_metadata,
_make_archive,
_create_result_objects,
_remove_job_dir,
utils.end_job])
return job_context
@utils.cache_keys('time_start', 'num_samples', 'time_end', 'formatted_command', work_dir_key='job_dir')
def _download_files(job_context: Dict) -> Dict:
job_context['time_start'] = timezone.now()
num_samples = 0
for key, samples in job_context['samples'].items():
outfile_dir = job_context['output_dir'] + key + '/'
os.makedirs(outfile_dir, exist_ok=True)
logger.debug("Downloading quant.sf files for quantpendia.",
accession_code=key,
job_id=job_context['job_id'],
**get_process_stats())
# download quant.sf files directly into the dataset folder
num_samples += smashing_utils.sync_quant_files(outfile_dir, samples)
job_context['num_samples'] = num_samples
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = "create_quantpendia.py"
logger.debug("Finished downloading quant.sf files for quantpendia.",
job_id=job_context['job_id'],
total_downloaded_files=num_samples,
**get_process_stats())
return job_context
@utils.cache_keys('metadata', work_dir_key='job_dir')
def _add_metadata(job_context: Dict) -> Dict:
logger.debug("Writing metadata for quantpendia.",
job_id=job_context['job_id'],
**get_process_stats())
smashing_utils.write_non_data_files(job_context)
shutil.copy("/home/user/README_QUANT.md", job_context["output_dir"] + "/README.md")
return job_context
@utils.cache_keys('archive_path', work_dir_key='job_dir')
def _make_archive(job_context: Dict):
compendia_organism = _get_organisms(job_context['samples']).first()
final_zip_base = job_context['job_dir'] + compendia_organism.name + "_rnaseq_compendia"
logger.debug("Generating archive.",
job_id=job_context['job_id'],
organism_name=compendia_organism.name,
**get_process_stats())
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
logger.debug("Quantpendia zip file generated.",
job_id=job_context['job_id'],
organism_name=compendia_organism.name,
**get_process_stats())
return {**job_context, 'archive_path': archive_path}
def _create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
archive_path = job_context['archive_path']
compendia_organism = _get_organisms(job_context['samples']).first()
compendia_version = _get_next_compendia_version(compendia_organism)
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
result.is_public = True
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_QUANTPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = FileUtils.get_filename(archive_path)
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.is_compendia = True
archive_computed_file.quant_sf_only = True
archive_computed_file.compendia_organism = compendia_organism
archive_computed_file.compendia_version = compendia_version
archive_computed_file.save()
compendium_result = CompendiumResult()
compendium_result.quant_sf_only = True
compendium_result.result = result
compendium_result.primary_organism = compendia_organism
compendium_result.compendium_version = compendia_version
compendium_result.save()
logger.info("Quantpendia created! Uploading to S3.",
job_id=job_context['job_id'],
archive_path=archive_path,
organism_name=compendia_organism.name,
**get_process_stats())
# Upload the result to S3
timestamp = str(int(time.time()))
s3_key = compendia_organism.name + "_" + str(compendia_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)
job_context['result'] = result
job_context['success'] = True
return job_context
def _remove_job_dir(job_context: Dict):
""" remove the directory when the job is successful. At this point
the quantpendia was already zipped and uploaded. """
# don't remove the files when running locally or for tests
if settings.RUNNING_IN_CLOUD:
shutil.rmtree(job_context["job_dir"], ignore_errors=True)
return job_context
def _make_dirs(job_context: Dict):
dataset_id = str(job_context["dataset"].pk)
job_context["job_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
os.makedirs(job_context["job_dir"], exist_ok=True)
job_context["output_dir"] = job_context["job_dir"] + "output/"
os.makedirs(job_context["output_dir"], exist_ok=True)
return job_context
def get_process_stats():
BYTES_IN_GB = 1024 * 1024 * 1024
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
return {'total_cpu': psutil.cpu_percent(), 'process_ram': ram_in_GB}
def _get_organisms(aggregated_samples: Dict[str, Sample]) -> List[Organism]:
organisms = set()
for key, samples in aggregated_samples.items():
organism_ids = samples.values_list('organism__id', flat=True).distinct()
organisms.update(organism_ids)
return Organism.objects.filter(id__in=list(organisms))
def _get_next_compendia_version(organism: Organism) -> int:
last_compendia = ComputedFile.objects\
.filter(is_compendia=True, quant_sf_only=True, compendia_organism=organism)\
.order_by('-compendia_version').first()
if last_compendia:
return last_compendia.compendia_version + 1
# otherwise this is the first compendia that we are generating
return 1
<|code_end|>
workers/data_refinery_workers/processors/smashing_utils.py
<|code_start|># -*- coding: utf-8 -*-
import csv
import logging
import math
import os
import multiprocessing
import shutil
import time
from pathlib import Path
from typing import Dict, List, Tuple
from concurrent.futures import ThreadPoolExecutor
from django.utils import timezone
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as rlang
from rpy2.robjects.packages import importr
import numpy as np
import pandas as pd
import psutil
import rpy2.robjects as ro
import simplejson as json
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import ComputedFile, Sample
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
MULTIPROCESSING_MAX_THREAD_COUNT = max(1, math.floor(multiprocessing.cpu_count()/2) - 1)
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BODY_HTML = Path(
'data_refinery_workers/processors/smasher_email.min.html'
).read_text().replace('\n', '')
BODY_ERROR_HTML = Path(
'data_refinery_workers/processors/smasher_email_error.min.html'
).read_text().replace('\n', '')
BYTES_IN_GB = 1024 * 1024 * 1024
QN_CHUNK_SIZE = 10000
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName('DEBUG'))
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message,
total_cpu=psutil.cpu_percent(),
process_ram=ram_in_GB,
job_id=job_id)
if start_time:
logger.debug('Duration: %s' % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def prepare_files(job_context: Dict) -> Dict:
"""
Fetches and prepares the files to smash.
"""
start_prepare_files = log_state("start prepare files", job_context["job"].id)
found_files = False
job_context['input_files'] = {}
# `key` can either be the species name or experiment accession.
for key, samples in job_context["samples"].items():
smashable_files = []
seen_files = set()
for sample in samples:
smashable_file = sample.get_most_recent_smashable_result_file()
if smashable_file is not None and smashable_file not in seen_files:
smashable_files = smashable_files + [(smashable_file, sample)]
seen_files.add(smashable_file)
found_files = True
job_context['input_files'][key] = smashable_files
job_context['num_input_files'] = len(job_context['input_files'])
job_context['group_by_keys'] = list(job_context['input_files'].keys())
if not found_files:
raise utils.ProcessorJobError("Couldn't get any files to smash for Smash job!!",
success=False,
dataset_id=job_context['dataset'].id,
num_samples=len(job_context["samples"]))
dataset_id = str(job_context["dataset"].pk)
job_context["work_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
# Ensure we have a fresh smash directory
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
os.makedirs(job_context["work_dir"])
job_context["output_dir"] = job_context["work_dir"] + "output/"
os.makedirs(job_context["output_dir"])
log_state("end prepare files", job_context["job"].id, start_prepare_files)
return job_context
def _load_and_sanitize_file(computed_file_path) -> pd.DataFrame:
""" Read and sanitize a computed file """
data = pd.read_csv(computed_file_path,
sep='\t',
header=0,
index_col=0,
dtype={0: str, 1: np.float32},
error_bad_lines=False)
# Strip any funky whitespace
data.columns = data.columns.str.strip()
data = data.dropna(axis='columns', how='all')
# Make sure the index type is correct
data.index = data.index.map(str)
# Ensure that we don't have any dangling Brainarray-generated probe symbols.
# BA likes to leave '_at', signifying probe identifiers,
# on their converted, non-probe identifiers. It makes no sense.
# So, we chop them off and don't worry about it.
data.index = data.index.str.replace('_at', '')
# Remove any lingering Affymetrix control probes ("AFFX-")
data = data[~data.index.str.contains('AFFX-')]
# If there are any _versioned_ gene identifiers, remove that
# version information. We're using the latest brainarray for everything anyway.
# Jackie says this is okay.
# She also says that in the future, we may only want to do this
# for cross-technology smashes.
# This regex needs to be able to handle EGIDs in the form:
# ENSGXXXXYYYZZZZ.6
# and
# fgenesh2_kg.7__3016__AT5G35080.1 (via http://plants.ensembl.org/Arabidopsis_lyrata/ \
# Gene/Summary?g=fgenesh2_kg.7__3016__AT5G35080.1;r=7:17949732-17952000;t=fgenesh2_kg. \
# 7__3016__AT5G35080.1;db=core)
data.index = data.index.str.replace(r"(\.[^.]*)$", '')
# Squish duplicated rows together.
# XXX/TODO: Is mean the appropriate method here?
# We can make this an option in future.
# Discussion here: https://github.com/AlexsLemonade/refinebio/issues/186#issuecomment-395516419
data = data.groupby(data.index, sort=False).mean()
return data
def process_frame(work_dir, computed_file, sample_accession_code, aggregate_by) -> pd.DataFrame:
""" Downloads the computed file from S3 and tries to see if it's smashable.
Returns a data frame if the file can be processed or False otherwise. """
try:
# Download the file to a job-specific location so it
# won't disappear while we're using it.
computed_file_path = computed_file.get_synced_file_path(
path="%s%s" % (work_dir, computed_file.filename)
)
# Bail appropriately if this isn't a real file.
if not computed_file_path or not os.path.exists(computed_file_path):
logger.warning("Smasher received non-existent file path.",
computed_file_path=computed_file_path,
computed_file_id=computed_file.id)
return None
data = _load_and_sanitize_file(computed_file_path)
if len(data.columns) > 2:
# Most of the time, >1 is actually bad, but we also need to support
# two-channel samples. I think ultimately those should be given some kind of
# special consideration.
logger.info("Found a frame with more than 2 columns - this shouldn't happen!",
computed_file_path=computed_file_path,
computed_file_id=computed_file.id)
return None
# via https://github.com/AlexsLemonade/refinebio/issues/330:
# aggregating by experiment -> return untransformed output from tximport
# aggregating by species -> log2(x + 1) tximport output
if aggregate_by == 'SPECIES' and computed_file.has_been_log2scaled():
data = data + 1
data = np.log2(data)
# Ideally done in the NO-OPPER, but sanity check here.
if (not computed_file.has_been_log2scaled()) and (data.max() > 100).any():
logger.info("Detected non-log2 microarray data.", computed_file_id=computed_file.id)
data = np.log2(data)
# Explicitly title this dataframe
try:
data.columns = [sample_accession_code]
except ValueError as e:
# This sample might have multiple channels, or something else.
# Don't mess with it.
logger.warn("Smasher found multi-channel column (probably) - skipping!",
exc_info=1,
computed_file_path=computed_file_path,)
return None
except Exception as e:
# Okay, somebody probably forgot to create a SampleComputedFileAssociation
# Don't mess with it.
logger.warn("Smasher found very bad column title - skipping!",
exc_info=1,
computed_file_path=computed_file_path)
return None
except Exception as e:
logger.exception("Unable to smash file", file=computed_file_path)
return None
# TEMPORARY for iterating on compendia more quickly.
# finally:
# # Delete before archiving the work dir
# if computed_file_path and os.path.exists(computed_file_path):
# os.remove(computed_file_path)
return data
def load_first_pass_data_if_cached(work_dir: str):
path = os.path.join(work_dir, 'first_pass.csv')
try:
with open(path, newline='') as csvfile:
reader = csv.reader(csvfile)
gene_ids = next(reader)
microarray_columns = next(reader)
rnaseq_columns = next(reader)
return {'gene_ids': gene_ids,
'microarray_columns': microarray_columns,
'rnaseq_columns': rnaseq_columns}
# If the file doesn't exist then the gene ids aren't cached. Any
# other exception should be handled and higher in the stack.
except FileNotFoundError:
return None
def cache_first_pass(job_context: Dict,
gene_ids: List[str],
microarray_columns: List[str],
rnaseq_columns: List[str]):
try:
path = os.path.join(job_context['work_dir'], 'first_pass.csv')
logger.info("Caching gene_ids, microarray_columns, and rnaseq_columns to %s",
path,
job_id=job_context['job'].id)
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(gene_ids)
writer.writerow(microarray_columns)
writer.writerow(rnaseq_columns)
# Nothing in the above try should raise an exception, but if it
# does don't waste the work we did in the first pass.
except Exception:
logger.exception('Error writing gene identifiers to CSV file.',
job_id=job_context['job'].id)
def process_frames_for_key(key: str,
input_files: List[Tuple[ComputedFile, Sample]],
job_context: Dict) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the keys 'microarray_matrix' and
'rnaseq_matrix' with pandas dataframes containing all of the
samples' data. Also adds the key 'unsmashable_files' containing a
list of paths that were determined to be unsmashable.
"""
start_gene_ids = log_state("Collecting all gene identifiers for key {}".format(key),
job_context["job"].id)
# Build up a list of gene identifiers because these will be the
# rows of our matrices, and we want to preallocate them so we need
# to know them all.
## We may have built this list in a previous job, check to see if it's cached:
cached_data = load_first_pass_data_if_cached(job_context['work_dir'])
first_pass_was_cached = False
if cached_data:
logger.info(("The data from the first pass was cached, so we're using "
"that and skipping the first pass."),
job_id=job_context['job'].id)
first_pass_was_cached = True
all_gene_identifiers = cached_data["gene_ids"]
microarray_columns = cached_data["microarray_columns"]
rnaseq_columns = cached_data["rnaseq_columns"]
else:
gene_identifier_counts = {}
microarray_columns = []
rnaseq_columns = []
for index, (computed_file, sample) in enumerate(input_files):
log_state('1st processing frame {}'.format(index), job_context["job"].id)
frame_data = process_frame(job_context["work_dir"],
computed_file,
sample.accession_code,
job_context['dataset'].aggregate_by)
if frame_data is None:
# we were unable to process this sample, so we drop
logger.warning('Unable to smash file',
computed_file=computed_file.id,
dataset_id=job_context['dataset'].id,
job_id=job_context["job"].id)
continue
# Count how many frames are in each tech so we can preallocate
# the matrices in both directions.
for gene_id in frame_data.index:
if gene_id in gene_identifier_counts:
gene_identifier_counts[gene_id] += 1
else:
gene_identifier_counts[gene_id] = 1
# Each dataframe should only have 1 column, but it's
# returned as a list so use extend.
if sample.technology == 'MICROARRAY':
microarray_columns.extend(frame_data.columns)
elif sample.technology == 'RNA-SEQ':
rnaseq_columns.extend(frame_data.columns)
# We only want to use gene identifiers which are present
# in >50% of the samples. We're doing this because a large
# number of gene identifiers present in only a modest
# number of experiments have leaked through. We wouldn't
# necessarily want to do this if we'd mapped all the data
# to ENSEMBL identifiers successfully.
total_samples = len(microarray_columns) + len(rnaseq_columns)
all_gene_identifiers = [gene_id for gene_id in gene_identifier_counts
if gene_identifier_counts[gene_id] > (total_samples * 0.5)]
all_gene_identifiers.sort()
del gene_identifier_counts
log_template = ("Collected {0} gene identifiers for {1} across"
" {2} micrarry samples and {3} RNA-Seq samples.")
log_state(log_template.format(len(all_gene_identifiers),
key,
len(microarray_columns),
len(rnaseq_columns)),
job_context["job"].id,
start_gene_ids)
# Temporarily only cache mouse compendia because it may not succeed.
if not first_pass_was_cached and key == "MUS_MUSCULUS":
cache_first_pass(job_context, all_gene_identifiers, microarray_columns, rnaseq_columns)
start_build_matrix = log_state("Beginning to build the full matrices.",
job_context["job"].id)
# Sort the columns so that the matrices are in predictable orders.
microarray_columns.sort()
rnaseq_columns.sort()
# Preallocate the matrices to be the exact size we will need. This
# should prevent any operations from happening while we build it
# up, so the only RAM used will be needed.
job_context['microarray_matrix'] = pd.DataFrame(data=None,
index=all_gene_identifiers,
columns=microarray_columns,
dtype=np.float32)
job_context['rnaseq_matrix'] = pd.DataFrame(data=None,
index=all_gene_identifiers,
columns=rnaseq_columns,
dtype=np.float32)
for index, (computed_file, sample) in enumerate(input_files):
log_state('2nd processing frame {}'.format(index), job_context["job"].id)
frame_data = process_frame(job_context["work_dir"],
computed_file,
sample.accession_code,
job_context['dataset'].aggregate_by)
if frame_data is None:
job_context['unsmashable_files'].append(computed_file.filename)
continue
else:
frame_data = frame_data.reindex(all_gene_identifiers)
# The dataframe for each sample will only have one column
# whose header will be the accession code.
column = frame_data.columns[0]
if sample.technology == 'MICROARRAY':
job_context['microarray_matrix'][column] = frame_data.values
elif sample.technology == 'RNA-SEQ':
job_context['rnaseq_matrix'][column] = frame_data.values
job_context['num_samples'] = 0
if job_context['microarray_matrix'] is not None:
job_context['num_samples'] += len(job_context['microarray_matrix'].columns)
if job_context['rnaseq_matrix'] is not None:
job_context['num_samples'] += len(job_context['rnaseq_matrix'].columns)
log_state("Built full matrices for key {}".format(key),
job_context["job"].id,
start_build_matrix)
return job_context
# Modified from: http://yaoyao.codes/pandas/2018/01/23/pandas-split-a-dataframe-into-chunks
def _index_marks(num_columns, chunk_size):
return range(chunk_size, math.ceil(num_columns / chunk_size) * chunk_size, chunk_size)
def _split_dataframe_columns(dataframe, chunk_size):
indices = _index_marks(dataframe.shape[1], chunk_size)
return np.split(dataframe, indices, axis=1)
def _quantile_normalize_matrix(target_vector, original_matrix):
preprocessCore = importr('preprocessCore')
as_numeric = rlang("as.numeric")
data_matrix = rlang('data.matrix')
# Convert the smashed frames to an R numeric Matrix
target_vector = as_numeric(target_vector)
# Do so in chunks if the matrix is too large.
if original_matrix.shape[1] <= QN_CHUNK_SIZE:
merged_matrix = data_matrix(original_matrix)
normalized_matrix = preprocessCore.normalize_quantiles_use_target(x=merged_matrix,
target=target_vector,
copy=True)
# And finally convert back to Pandas
ar = np.array(normalized_matrix)
new_merged = pd.DataFrame(ar,
columns=original_matrix.columns,
index=original_matrix.index)
else:
matrix_chunks = _split_dataframe_columns(original_matrix, QN_CHUNK_SIZE)
for i, chunk in enumerate(matrix_chunks):
R_chunk = data_matrix(chunk)
normalized_chunk = preprocessCore.normalize_quantiles_use_target(
x=R_chunk,
target=target_vector,
copy=True
)
ar = np.array(normalized_chunk)
start_column = i * QN_CHUNK_SIZE
end_column = (i + 1) * QN_CHUNK_SIZE
original_matrix.iloc[:, start_column:end_column] = ar
new_merged = original_matrix
return new_merged
def _test_qn(merged_matrix):
""" Selects a list of 100 random pairs of columns and performs the KS Test on them.
Returns a list of tuples with the results of the KN test (statistic, pvalue) """
# Verify this QN, related:
# https://github.com/AlexsLemonade/refinebio/issues/599#issuecomment-422132009
data_matrix = rlang('data.matrix')
as_numeric = rlang("as.numeric")
set_seed = rlang("set.seed")
combn = rlang("combn")
ncol = rlang("ncol")
ks_test = rlang("ks.test")
which = rlang("which")
merged_R_matrix = data_matrix(merged_matrix)
set_seed(123)
n = ncol(merged_R_matrix)[0]
m = 2
# Not enough columns to perform KS test - either bad smash or single sample smash.
if n<m: return None
# This wont work with larger matricies
# https://github.com/AlexsLemonade/refinebio/issues/1860
ncolumns = ncol(merged_R_matrix)
if ncolumns[0] <= 200:
# Convert to NP, Shuffle, Return to R
combos = combn(ncolumns, 2)
ar = np.array(combos)
np.random.shuffle(np.transpose(ar))
else:
indexes = [*range(ncolumns[0])]
np.random.shuffle(indexes)
ar = np.array([*zip(indexes[0:100], indexes[100:200])])
nr, nc = ar.shape
combos = ro.r.matrix(ar, nrow=nr, ncol=nc)
result = []
# adapted from
# https://stackoverflow.com/questions/9661469/r-t-test-over-all-columns
# apply KS test to randomly selected pairs of columns (samples)
for i in range(1, min(ncol(combos)[0], 100)):
value1 = combos.rx(1, i)[0]
value2 = combos.rx(2, i)[0]
test_a = merged_R_matrix.rx(True, value1)
test_b = merged_R_matrix.rx(True, value2)
# RNA-seq has a lot of zeroes in it, which
# breaks the ks_test. Therefore we want to
# filter them out. To do this we drop the
# lowest half of the values. If there's
# still zeroes in there, then that's
# probably too many zeroes so it's okay to
# fail.
median_a = np.median(test_a)
median_b = np.median(test_b)
# `which` returns indices which are
# 1-indexed. Python accesses lists with
# zero-indexes, even if that list is
# actually an R vector. Therefore subtract
# 1 to account for the difference.
test_a = [test_a[i-1] for i in which(test_a > median_a)]
test_b = [test_b[i-1] for i in which(test_b > median_b)]
# The python list comprehension gives us a
# python list, but ks_test wants an R
# vector so let's go back.
test_a = as_numeric(test_a)
test_b = as_numeric(test_b)
ks_res = ks_test(test_a, test_b)
statistic = ks_res.rx('statistic')[0][0]
pvalue = ks_res.rx('p.value')[0][0]
result.append((statistic, pvalue))
return result
def quantile_normalize(job_context: Dict, ks_check=True, ks_stat=0.001) -> Dict:
"""
Apply quantile normalization.
"""
# Prepare our QN target file
organism = job_context['organism']
if not organism.qn_target:
failure_reason = "Could not find QN target for Organism: " + str(organism)
job_context['dataset'].success = False
job_context['dataset'].failure_reason = failure_reason
job_context['dataset'].save()
raise utils.ProcessorJobError(failure_reason,
success=False,
organism=organism,
dataset_id=job_context['dataset'].id)
qn_target_path = organism.qn_target.computedfile_set.latest().sync_from_s3()
qn_target_frame = pd.read_csv(qn_target_path, sep='\t', header=None,
index_col=None, error_bad_lines=False)
# Prepare our RPy2 bridge
pandas2ri.activate()
# Remove un-quantiled normalized matrix from job_context
# because we no longer need it.
merged_no_qn = job_context.pop('merged_no_qn')
# Perform the Actual QN
new_merged = _quantile_normalize_matrix(qn_target_frame[0], merged_no_qn)
# And add the quantile normalized matrix to job_context.
job_context['merged_qn'] = new_merged
# For now, don't test the QN for mouse/human. This never fails on
# smasher jobs and is OOM-killing our very large compendia
# jobs. Let's run this manually after we have a compendia job
# actually finish.
if organism.name in ["MUS_MUSCULUS", "HOMO_SAPIENS"]: return job_context
ks_res = _test_qn(new_merged)
if ks_res:
for (statistic, pvalue) in ks_res:
job_context['ks_statistic'] = statistic
job_context['ks_pvalue'] = pvalue
# We're unsure of how strigent to be about
# the pvalue just yet, so we're extra lax
# rather than failing tons of tests. This may need tuning.
if ks_check and (statistic > ks_stat or pvalue < 0.8):
job_context['ks_warning'] = ("Failed Kolmogorov Smirnov test! Stat: " +
str(statistic) + ", PVal: " + str(pvalue))
else:
logger.warning("Not enough columns to perform KS test - either bad smash or single sample smash.",
dataset_id=job_context['dataset'].id)
return job_context
def compile_metadata(job_context: Dict) -> Dict:
"""Compiles metadata about the job.
Returns a new dict containing the metadata, not the job_context.
"""
metadata = {}
metadata['num_samples'] = job_context['num_samples']
metadata['num_experiments'] = job_context['experiments'].count()
metadata['quant_sf_only'] = job_context['dataset'].quant_sf_only
if not job_context['dataset'].quant_sf_only:
metadata['aggregate_by'] = job_context["dataset"].aggregate_by
metadata['scale_by'] = job_context["dataset"].scale_by
# https://github.com/AlexsLemonade/refinebio/pull/421#discussion_r203799646
# TODO: do something with these.
# metadata['non_aggregated_files'] = job_context["unsmashable_files"]
metadata['ks_statistic'] = job_context.get("ks_statistic", None)
metadata['ks_pvalue'] = job_context.get("ks_pvalue", None)
metadata['ks_warning'] = job_context.get("ks_warning", None)
metadata['quantile_normalized'] = job_context['dataset'].quantile_normalize
samples = {}
for sample in job_context["dataset"].get_samples():
samples[sample.accession_code] = sample.to_metadata_dict()
metadata['samples'] = samples
experiments = {}
for experiment in job_context["dataset"].get_experiments():
experiments[experiment.accession_code] = experiment.to_metadata_dict()
metadata['experiments'] = experiments
return metadata
def write_non_data_files(job_context: Dict) -> Dict:
"""Writes the files that are not the actual data of the dataset.
This include LICENSE.txt and README.md files and the metadata.
Adds the key `metadata` to job_context and populates it with all
the metadata that needs to be written.
"""
job_context['metadata'] = compile_metadata(job_context)
shutil.copy("README_DATASET.md", job_context["output_dir"] + "README.md")
shutil.copy("LICENSE_DATASET.txt", job_context["output_dir"] + "LICENSE.TXT")
# Write samples metadata to TSV
try:
tsv_paths = write_tsv_json(job_context)
job_context['metadata_tsv_paths'] = tsv_paths
# Metadata to JSON
job_context['metadata']['created_at'] = timezone.now().strftime('%Y-%m-%dT%H:%M:%S')
with open(job_context["output_dir"] + 'aggregated_metadata.json',
'w',
encoding='utf-8') as metadata_file:
json.dump(job_context['metadata'], metadata_file, indent=4, sort_keys=True)
except Exception as e:
logger.exception("Failed to write metadata TSV!",
job_id=job_context['job'].id)
job_context['metadata_tsv_paths'] = None
return job_context
def get_experiment_accession(sample_accession_code, dataset_data):
for experiment_accession, samples in dataset_data.items():
if sample_accession_code in samples:
return experiment_accession
return "" # Should never happen, because the sample is by definition in the dataset
def _add_annotation_column(annotation_columns, column_name):
"""Add annotation column names in place.
Any column_name that starts with "refinebio_" will be skipped.
"""
if not column_name.startswith("refinebio_"):
annotation_columns.add(column_name)
def _add_annotation_value(row_data, col_name, col_value, sample_accession_code):
"""Adds a new `col_name` key whose value is `col_value` to row_data.
If col_name already exists in row_data with different value, print
out a warning message.
"""
# Generate a warning message if annotation field name starts with
# "refinebio_". This should rarely (if ever) happen.
if col_name.startswith("refinebio_"):
logger.warning(
"Annotation value skipped",
annotation_field=col_name,
annotation_value=col_value,
sample_accession_code=sample_accession_code
)
elif col_name not in row_data:
row_data[col_name] = col_value
# Generate a warning message in case of conflicts of annotation values.
# (Requested by Dr. Jackie Taroni)
elif row_data[col_name] != col_value:
logger.warning(
"Conflict of values found in column %s: %s vs. %s" % (
col_name, row_data[col_name], col_value),
sample_accession_code=sample_accession_code
)
def get_tsv_row_data(sample_metadata, dataset_data):
"""Returns field values based on input sample_metadata.
Some annotation fields are treated specially because they are more
important. See `get_tsv_columns` function above for details.
"""
sample_accession_code = sample_metadata.get('refinebio_accession_code', '')
row_data = dict()
for meta_key, meta_value in sample_metadata.items():
# If the field is a refinebio-specific field, simply copy it.
if meta_key != 'refinebio_annotations':
row_data[meta_key] = meta_value
continue
# Decompose sample_metadata["refinebio_annotations"], which is
# an array of annotations.
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# "characteristic" in ArrayExpress annotation
if sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "characteristic":
for pair_dict in annotation_value:
if 'category' in pair_dict and 'value' in pair_dict:
col_name, col_value = pair_dict['category'], pair_dict['value']
_add_annotation_value(row_data, col_name, col_value,
sample_accession_code)
# "variable" in ArrayExpress annotation
elif sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "variable":
for pair_dict in annotation_value:
if 'name' in pair_dict and 'value' in pair_dict:
col_name, col_value = pair_dict['name'], pair_dict['value']
_add_annotation_value(row_data, col_name, col_value,
sample_accession_code)
# Skip "source" field ArrayExpress sample's annotation
elif sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "source":
continue
# "characteristics_ch1" in GEO annotation
elif sample_metadata.get('refinebio_source_database', '') == "GEO" \
and annotation_key == "characteristics_ch1": # array of strings
for pair_str in annotation_value:
if ':' in pair_str:
col_name, col_value = pair_str.split(':', 1)
col_value = col_value.strip()
_add_annotation_value(row_data, col_name, col_value,
sample_accession_code)
# If annotation_value includes only a 'name' key, extract its value directly:
elif isinstance(annotation_value, dict) \
and len(annotation_value) == 1 and 'name' in annotation_value:
_add_annotation_value(row_data, annotation_key, annotation_value['name'],
sample_accession_code)
# If annotation_value is a single-element array, extract the element directly:
elif isinstance(annotation_value, list) and len(annotation_value) == 1:
_add_annotation_value(row_data, annotation_key, annotation_value[0],
sample_accession_code)
# Otherwise save all annotation fields in separate columns
else:
_add_annotation_value(row_data, annotation_key, annotation_value,
sample_accession_code)
row_data["experiment_accession"] = get_experiment_accession(sample_accession_code,
dataset_data)
return row_data
def get_tsv_columns(job_context, samples_metadata):
"""Returns an array of strings that will be written as a TSV file's
header. The columns are based on fields found in samples_metadata.
Some nested annotation fields are taken out as separate columns
because they are more important than the others.
"""
tsv_start = log_state("start get tsv columns", job_context["job"].id)
refinebio_columns = set()
annotation_columns = set()
for sample_metadata in samples_metadata.values():
for meta_key, meta_value in sample_metadata.items():
if meta_key != 'refinebio_annotations':
refinebio_columns.add(meta_key)
continue
# Decompose sample_metadata["annotations"], which is an array of annotations!
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# For ArrayExpress samples, take out the fields
# nested in "characteristic" as separate columns.
if sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "characteristic":
for pair_dict in annotation_value:
if 'category' in pair_dict and 'value' in pair_dict:
_add_annotation_column(annotation_columns, pair_dict['category'])
# For ArrayExpress samples, also take out the fields
# nested in "variable" as separate columns.
elif sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "variable":
for pair_dict in annotation_value:
if 'name' in pair_dict and 'value' in pair_dict:
_add_annotation_column(annotation_columns, pair_dict['name'])
# For ArrayExpress samples, skip "source" field
elif sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "source":
continue
# For GEO samples, take out the fields nested in
# "characteristics_ch1" as separate columns.
elif sample_metadata.get('refinebio_source_database', '') == "GEO" \
and annotation_key == "characteristics_ch1": # array of strings
for pair_str in annotation_value:
if ':' in pair_str:
tokens = pair_str.split(':', 1)
_add_annotation_column(annotation_columns, tokens[0])
# Saves all other annotation fields in separate columns
else:
_add_annotation_column(annotation_columns, annotation_key)
# Return sorted columns, in which "refinebio_accession_code" and "experiment_accession" are
# always first, followed by the other refinebio columns (in alphabetic order), and
# annotation columns (in alphabetic order) at the end.
refinebio_columns.discard('refinebio_accession_code')
log_state("end get tsv columns", job_context["job"].id, tsv_start)
return ['refinebio_accession_code', 'experiment_accession'] + sorted(refinebio_columns) \
+ sorted(annotation_columns)
def write_tsv_json(job_context):
"""Writes tsv files on disk.
If the dataset is aggregated by species, also write species-level
JSON file.
"""
# Avoid pulling this out of job_context repeatedly.
metadata = job_context['metadata']
# Uniform TSV header per dataset
columns = get_tsv_columns(job_context, metadata['samples'])
# Per-Experiment Metadata
if job_context["dataset"].aggregate_by == "EXPERIMENT":
tsv_paths = []
for experiment_title, experiment_data in metadata['experiments'].items():
experiment_dir = job_context["output_dir"] + experiment_title + '/'
experiment_dir = experiment_dir.encode('ascii', 'ignore')
os.makedirs(experiment_dir, exist_ok=True)
tsv_path = experiment_dir.decode("utf-8") + 'metadata_' + experiment_title + '.tsv'
tsv_path = tsv_path.encode('ascii', 'ignore')
tsv_paths.append(tsv_path)
with open(tsv_path, 'w', encoding='utf-8') as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter='\t', extrasaction='ignore')
dw.writeheader()
for sample_accession_code, sample_metadata in metadata['samples'].items():
if sample_accession_code in experiment_data['sample_accession_codes']:
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
return tsv_paths
# Per-Species Metadata
elif job_context["dataset"].aggregate_by == "SPECIES":
tsv_paths = []
for species in job_context['group_by_keys']:
species_dir = job_context["output_dir"] + species + '/'
os.makedirs(species_dir, exist_ok=True)
samples_in_species = []
tsv_path = species_dir + "metadata_" + species + '.tsv'
tsv_paths.append(tsv_path)
with open(tsv_path, 'w', encoding='utf-8') as tsv_file:
# See http://www.lucainvernizzi.net/blog/2015/08/03/8x-speed-up-for-python-s-csv-dictwriter/
# about extrasaction.
dw = csv.DictWriter(tsv_file, columns, delimiter='\t', extrasaction='ignore')
dw.writeheader()
i = 0
for sample_metadata in metadata['samples'].values():
if sample_metadata.get('refinebio_organism', '') == species:
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
samples_in_species.append(sample_metadata)
i = i + 1
if i % 1000 == 0:
progress_template = ('Done with {0} out of {1} lines of metadata '
'for species {2}')
log_state(progress_template.format(i, len(metadata['samples']), species),
job_context['job'].id)
# Writes a json file for current species:
if len(samples_in_species):
species_metadata = {
'species': species,
'samples': samples_in_species
}
json_path = species_dir + "metadata_" + species + '.json'
with open(json_path, 'w', encoding='utf-8') as json_file:
json.dump(species_metadata, json_file, indent=4, sort_keys=True)
return tsv_paths
# All Metadata
else:
all_dir = job_context["output_dir"] + "ALL/"
os.makedirs(all_dir, exist_ok=True)
tsv_path = all_dir + 'metadata_ALL.tsv'
with open(tsv_path, 'w', encoding='utf-8') as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter='\t', extrasaction='ignore')
dw.writeheader()
for sample_metadata in metadata['samples'].values():
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
return [tsv_path]
def download_computed_file(download_tuple: Tuple[ComputedFile, str]):
""" this function downloads the latest computed file. Receives a tuple with
the computed file and the path where it needs to be downloaded
This is used to parallelize downloading quantsf files. """
(latest_computed_file, output_file_path) = download_tuple
try:
latest_computed_file.get_synced_file_path(path=output_file_path)
except:
# Let's not fail if there's an error syncing one of the quant.sf files
logger.exception('Failed to sync computed file', computed_file_id=latest_computed_file.pk)
def sync_quant_files(output_path, samples: List[Sample]):
""" Takes a list of ComputedFiles and copies the ones that are quant files to the provided directory.
Returns the total number of samples that were included """
num_samples = 0
page_size = 100
# split the samples in groups and download each one individually
with ThreadPoolExecutor(max_workers=MULTIPROCESSING_MAX_THREAD_COUNT) as executor:
# for each sample we need it's latest quant.sf file we don't want to query the db
# for all of them, so we do it in groups of 100, and then download all of the computed_files
# in parallel
for sample_page in (samples[i*page_size:i+page_size] for i in range(0, len(samples), page_size)):
sample_and_computed_files = []
for sample in sample_page:
latest_computed_file = sample.get_most_recent_quant_sf_file()
if not latest_computed_file:
continue
output_file_path = output_path + sample.accession_code + "_quant.sf"
sample_and_computed_files.append((latest_computed_file, output_file_path))
# download this set of files, this will take a few seconds that should also help the db recover
executor.map(download_computed_file, sample_and_computed_files)
num_samples += len(sample_and_computed_files)
return num_samples
<|code_end|>
| workers/data_refinery_workers/processors/create_compendia.py
<|code_start|>import logging
import os
import shutil
import time
import itertools
from typing import Dict
from django.utils import timezone
from django.db.models import Q, Count
from fancyimpute import IterativeSVD
import numpy as np
import pandas as pd
import psutil
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputationalResultAnnotation,
CompendiumResult,
CompendiumResultOrganismAssociation,
ComputedFile,
Organism,
Pipeline,
Sample)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
pd.set_option('mode.chained_assignment', None)
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BYTES_IN_GB = 1024 * 1024 * 1024
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName('DEBUG'))
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message,
total_cpu=psutil.cpu_percent(),
process_ram=ram_in_GB,
job_id=job_id)
if start_time:
logger.debug('Duration: %s' % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _prepare_input(job_context: Dict) -> Dict:
start_time = log_state("prepare input", job_context["job"].id)
job_context["primary_organism"] = max(job_context["samples"],
key=lambda organism:len(job_context["samples"][organism]))
job_context["all_organisms"] = job_context["samples"].keys()
all_samples = list(itertools.chain(*job_context["samples"].values()))
job_context["samples"] = {job_context["primary_organism"]: all_samples}
# We'll store here all sample accession codes that didn't make it into the compendia
# with the reason why not.
job_context['filtered_samples'] = {}
job_context = smashing_utils.prepare_files(job_context)
# Compendia jobs only run for one organism, so we know the only
# key will be the organism name, unless of course we've already failed.
if job_context['job'].success is not False:
job_context["organism_name"] = job_context['group_by_keys'][0]
# TEMPORARY for iterating on compendia more quickly. Rather
# than downloading the data from S3 each run we're just gonna
# use the same directory every job.
job_context["old_work_dir"] = job_context["work_dir"]
job_context["work_dir"] = SMASHING_DIR + job_context["organism_name"] + "/"
if not os.path.exists(job_context["work_dir"]):
os.makedirs(job_context["work_dir"])
log_state("prepare input done", job_context["job"].id, start_time)
return job_context
def _prepare_frames(job_context: Dict) -> Dict:
start_prepare_frames = log_state("start _prepare_frames", job_context["job"].id)
job_context['unsmashable_files'] = []
job_context['num_samples'] = 0
# Smash all of the sample sets
logger.debug("About to smash!",
dataset_count=len(job_context['dataset'].data),
job_id=job_context['job'].id)
try:
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop('input_files').items():
job_context = smashing_utils.process_frames_for_key(key, input_files, job_context)
# if len(job_context['all_frames']) < 1:
# TODO: Enable this check?
except Exception as e:
raise utils.ProcessorJobError("Could not prepare frames for compendia.",
success=False,
dataset_id=job_context['dataset'].id,
processor_job_id=job_context['job_id'],
num_input_files=job_context['num_input_files'])
job_context['dataset'].success = True
job_context['dataset'].save()
log_state("end _prepare_frames", job_context["job"].id, start_prepare_frames)
return job_context
def _perform_imputation(job_context: Dict) -> Dict:
"""
Take the inputs and perform the primary imputation.
Via https://github.com/AlexsLemonade/refinebio/issues/508#issuecomment-435879283:
- Combine all microarray samples with a full join to form a
microarray_expression_matrix (this may end up being a DataFrame).
- Combine all RNA-seq samples (lengthScaledTPM) with a full outer join
to form a rnaseq_expression_matrix.
- Calculate the sum of the lengthScaledTPM values for each row (gene) of
the rnaseq_expression_matrix (rnaseq_row_sums).
- Calculate the 10th percentile of rnaseq_row_sums
- Drop all rows in rnaseq_expression_matrix with a row sum < 10th percentile of
rnaseq_row_sums; this is now filtered_rnaseq_matrix
- log2(x + 1) transform filtered_rnaseq_matrix; this is now log2_rnaseq_matrix
- Set all zero values in log2_rnaseq_matrix to NA, but make sure to keep track of
where these zeroes are
- Perform a full outer join of microarray_expression_matrix and
log2_rnaseq_matrix; combined_matrix
- Remove genes (rows) with >30% missing values in combined_matrix
- Remove samples (columns) with >50% missing values in combined_matrix
- "Reset" zero values that were set to NA in RNA-seq samples (i.e., make these zero
again) in combined_matrix
- Transpose combined_matrix; transposed_matrix
- Perform imputation of missing values with IterativeSVD (rank=10) on
the transposed_matrix; imputed_matrix
-- with specified svd algorithm or skip
- Untranspose imputed_matrix (genes are now rows, samples are now columns)
- Quantile normalize imputed_matrix where genes are rows and samples are columns
"""
imputation_start = log_state("start perform imputation", job_context["job"].id)
job_context['time_start'] = timezone.now()
rnaseq_row_sums_start = log_state("start rnaseq row sums", job_context["job"].id)
# We potentially can have a microarray-only compendia but not a RNASeq-only compendia
log2_rnaseq_matrix = None
if job_context['rnaseq_matrix'] is not None:
# Drop any genes that are entirely NULL in the RNA-Seq matrix
job_context['rnaseq_matrix'] = job_context['rnaseq_matrix'].dropna(axis='columns',
how='all')
# Calculate the sum of the lengthScaledTPM values for each row
# (gene) of the rnaseq_matrix (rnaseq_row_sums)
rnaseq_row_sums = np.sum(job_context['rnaseq_matrix'], axis=1)
log_state("end rnaseq row sums", job_context["job"].id, rnaseq_row_sums_start)
rnaseq_decile_start = log_state("start rnaseq decile", job_context["job"].id)
# Calculate the 10th percentile of rnaseq_row_sums
rnaseq_tenth_percentile = np.percentile(rnaseq_row_sums, 10)
log_state("end rnaseq decile", job_context["job"].id, rnaseq_decile_start)
drop_start = log_state("drop all rows", job_context["job"].id)
# Drop all rows in rnaseq_matrix with a row sum < 10th
# percentile of rnaseq_row_sums; this is now
# filtered_rnaseq_matrix
# TODO: This is probably a better way to do this with `np.where`
rows_to_filter = []
for (x, sum_val) in rnaseq_row_sums.items():
if sum_val < rnaseq_tenth_percentile:
rows_to_filter.append(x)
del rnaseq_row_sums
log_state("actually calling drop()", job_context["job"].id)
filtered_rnaseq_matrix = job_context.pop('rnaseq_matrix').drop(rows_to_filter)
del rows_to_filter
log_state("end drop all rows", job_context["job"].id, drop_start)
log2_start = log_state("start log2", job_context["job"].id)
# log2(x + 1) transform filtered_rnaseq_matrix; this is now log2_rnaseq_matrix
filtered_rnaseq_matrix_plus_one = filtered_rnaseq_matrix + 1
log2_rnaseq_matrix = np.log2(filtered_rnaseq_matrix_plus_one)
del filtered_rnaseq_matrix_plus_one
del filtered_rnaseq_matrix
log_state("end log2", job_context["job"].id, log2_start)
cache_start = log_state("start caching zeroes", job_context["job"].id)
# Cache our RNA-Seq zero values
cached_zeroes = {}
for column in log2_rnaseq_matrix.columns:
cached_zeroes[column] = log2_rnaseq_matrix.index[np.where(log2_rnaseq_matrix[column] == 0)]
# Set all zero values in log2_rnaseq_matrix to NA, but make sure
# to keep track of where these zeroes are
log2_rnaseq_matrix[log2_rnaseq_matrix == 0] = np.nan
log_state("end caching zeroes", job_context["job"].id, cache_start)
outer_merge_start = log_state("start outer merge", job_context["job"].id)
# Perform a full outer join of microarray_matrix and
# log2_rnaseq_matrix; combined_matrix
if log2_rnaseq_matrix is not None:
combined_matrix = job_context.pop('microarray_matrix').merge(log2_rnaseq_matrix,
how='outer',
left_index=True,
right_index=True)
else:
logger.info("Building compendia with only microarray data.", job_id=job_context["job"].id)
combined_matrix = job_context.pop('microarray_matrix')
log_state("ran outer merge, now deleteing log2_rnaseq_matrix", job_context["job"].id)
del log2_rnaseq_matrix
log_state("end outer merge", job_context["job"].id, outer_merge_start)
drop_na_genes_start = log_state("start drop NA genes", job_context["job"].id)
# # Visualize Prefiltered
# output_path = job_context['output_dir'] + "pre_filtered_" + str(time.time()) + ".png"
# visualized_prefilter = visualize.visualize(combined_matrix.copy(), output_path)
# Remove genes (rows) with <=70% present values in combined_matrix
thresh = combined_matrix.shape[1] * .7 # (Rows, Columns)
# Everything below `thresh` is dropped
row_filtered_matrix = combined_matrix.dropna(axis='index', thresh=thresh)
del combined_matrix
del thresh
log_state("end drop NA genes", job_context["job"].id, drop_na_genes_start)
drop_na_samples_start = log_state("start drop NA samples", job_context["job"].id)
# # Visualize Row Filtered
# output_path = job_context['output_dir'] + "row_filtered_" + str(time.time()) + ".png"
# visualized_rowfilter = visualize.visualize(row_filtered_matrix.copy(), output_path)
# Remove samples (columns) with <50% present values in combined_matrix
# XXX: Find better test data for this!
col_thresh = row_filtered_matrix.shape[0] * .5
row_col_filtered_matrix_samples = row_filtered_matrix.dropna(axis='columns',
thresh=col_thresh)
row_col_filtered_matrix_samples_index = row_col_filtered_matrix_samples.index
row_col_filtered_matrix_samples_columns = row_col_filtered_matrix_samples.columns
log_state("end drop NA genes", job_context["job"].id, drop_na_samples_start)
replace_zeroes_start = log_state("start replace zeroes", job_context["job"].id)
for sample_accession_code in row_filtered_matrix.columns:
if sample_accession_code not in row_col_filtered_matrix_samples_columns:
sample = Sample.objects.get(accession_code=sample_accession_code)
sample_metadata = sample.to_metadata_dict()
job_context['filtered_samples'][sample_accession_code] = {
**sample_metadata,
'reason': 'Sample was dropped because it had less than 50% present values.',
'experiment_accession_code': smashing_utils.get_experiment_accession(sample.accession_code, job_context['dataset'].data)
}
del row_filtered_matrix
# # Visualize Row and Column Filtered
# output_path = job_context['output_dir'] + "row_col_filtered_" + str(time.time()) + ".png"
# visualized_rowcolfilter = visualize.visualize(row_col_filtered_matrix_samples.copy(),
# output_path)
# "Reset" zero values that were set to NA in RNA-seq samples
# (i.e., make these zero again) in combined_matrix
for column in cached_zeroes.keys():
zeroes = cached_zeroes[column]
# Skip purged columns
if column not in row_col_filtered_matrix_samples:
continue
# Place the zero
try:
# This generates a warning, so use loc[] instead
# row_col_filtered_matrix_samples[column].replace(zeroes, 0.0, inplace=True)
zeroes_list = zeroes.tolist()
new_index_list = row_col_filtered_matrix_samples_index.tolist()
new_zeroes = list(set(new_index_list) & set(zeroes_list))
row_col_filtered_matrix_samples[column].loc[new_zeroes] = 0.0
except Exception as e:
logger.warn("Error when replacing zero")
continue
log_state("end replace zeroes", job_context["job"].id, replace_zeroes_start)
transposed_zeroes_start = log_state("start replacing transposed zeroes", job_context["job"].id)
# Label our new replaced data
combined_matrix_zero = row_col_filtered_matrix_samples
del row_col_filtered_matrix_samples
transposed_matrix_with_zeros = combined_matrix_zero.T
del combined_matrix_zero
# Remove -inf and inf
# This should never happen, but make sure it doesn't!
transposed_matrix = transposed_matrix_with_zeros.replace([np.inf, -np.inf], np.nan)
del transposed_matrix_with_zeros
log_state("end replacing transposed zeroes", job_context["job"].id, transposed_zeroes_start)
# Store the absolute/percentages of imputed values
matrix_sum = transposed_matrix.isnull().sum()
percent = (matrix_sum / transposed_matrix.isnull().count()).sort_values(ascending=False)
total_percent_imputed = sum(percent) / len(transposed_matrix.count())
job_context['total_percent_imputed'] = total_percent_imputed
logger.info("Total percentage of data to impute!", total_percent_imputed=total_percent_imputed)
# Perform imputation of missing values with IterativeSVD (rank=10) on the
# transposed_matrix; imputed_matrix
svd_algorithm = job_context['dataset'].svd_algorithm
if svd_algorithm != 'NONE':
svd_start = log_state("start SVD", job_context["job"].id)
logger.info("IterativeSVD algorithm: %s" % svd_algorithm)
svd_algorithm = str.lower(svd_algorithm)
imputed_matrix = IterativeSVD(
rank=10,
svd_algorithm=svd_algorithm
).fit_transform(transposed_matrix)
svd_start = log_state("end SVD", job_context["job"].id, svd_start)
else:
imputed_matrix = transposed_matrix
logger.info("Skipping IterativeSVD")
del transposed_matrix
untranspose_start = log_state("start untranspose", job_context["job"].id)
# Untranspose imputed_matrix (genes are now rows, samples are now columns)
untransposed_imputed_matrix = imputed_matrix.T
del imputed_matrix
# Convert back to Pandas
untransposed_imputed_matrix_df = pd.DataFrame.from_records(untransposed_imputed_matrix)
untransposed_imputed_matrix_df.index = row_col_filtered_matrix_samples_index
untransposed_imputed_matrix_df.columns = row_col_filtered_matrix_samples_columns
del untransposed_imputed_matrix
del row_col_filtered_matrix_samples_index
del row_col_filtered_matrix_samples_columns
# Quantile normalize imputed_matrix where genes are rows and samples are columns
job_context['organism'] = Organism.get_object_for_name(job_context['organism_name'])
job_context['merged_no_qn'] = untransposed_imputed_matrix_df
# output_path = job_context['output_dir'] + "compendia_no_qn_" + str(time.time()) + ".png"
# visualized_merged_no_qn = visualize.visualize(untransposed_imputed_matrix_df.copy(),
# output_path)
log_state("end untranspose", job_context["job"].id, untranspose_start)
quantile_start = log_state("start quantile normalize", job_context["job"].id)
# Perform the Quantile Normalization
job_context = smashing_utils.quantile_normalize(job_context, ks_check=False)
log_state("end quantile normalize", job_context["job"].id, quantile_start)
# Visualize Final Compendia
# output_path = job_context['output_dir'] + "compendia_with_qn_" + str(time.time()) + ".png"
# visualized_merged_qn = visualize.visualize(job_context['merged_qn'].copy(), output_path)
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = ["create_compendia.py"]
log_state("end prepare imputation", job_context["job"].id, imputation_start)
return job_context
def _create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
result_start = log_state("start create result object", job_context["job"].id)
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
# Temporary until we re-enable the QN test step.
result.is_public = False
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_COMPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
# Write the compendia dataframe to a file
job_context['csv_outfile'] = job_context['output_dir'] + job_context['organism_name'] + '.tsv'
job_context['merged_qn'].to_csv(job_context['csv_outfile'], sep='\t', encoding='utf-8')
organism_key = list(job_context['samples'].keys())[0]
annotation = ComputationalResultAnnotation()
annotation.result = result
annotation.data = {
"organism_id": job_context['samples'][organism_key][0].organism_id,
"organism_name": job_context['organism_name'],
"is_qn": False,
"is_compendia": True,
"samples": [sample.accession_code for sample in job_context["samples"][organism_key]],
"num_samples": len(job_context["samples"][organism_key]),
"experiment_accessions": [e.accession_code for e in job_context['experiments']],
"total_percent_imputed": job_context['total_percent_imputed']
}
annotation.save()
# Create the resulting archive
final_zip_base = SMASHING_DIR + str(job_context["dataset"].pk) + "_compendia"
# Copy LICENSE.txt and correct README.md files.
if job_context["dataset"].quant_sf_only:
readme_file = "/home/user/README_QUANT.md"
else:
readme_file = "/home/user/README_NORMALIZED.md"
shutil.copy(readme_file, job_context["output_dir"] + "/README.md")
shutil.copy("/home/user/LICENSE_DATASET.txt", job_context["output_dir"] + "/LICENSE.TXT")
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = archive_path.split('/')[-1]
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.save()
# Compendia Result Helpers
primary_organism = Organism.get_object_for_name(job_context['primary_organism'])
organisms = [Organism.get_object_for_name(organism) for organism in job_context["all_organisms"]]
compendium_version = CompendiumResult.objects.filter(
primary_organism=primary_organism,
quant_sf_only=False
).count() + 1
# Save Compendia Result
compendium_result = CompendiumResult()
compendium_result.quant_sf_only = job_context["dataset"].quant_sf_only
compendium_result.svd_algorithm = job_context['dataset'].svd_algorithm
compendium_result.compendium_version = compendium_version
compendium_result.result = result
compendium_result.primary_organism = primary_organism
compendium_result.save()
# create relations to all organisms contained in the compendia
compendium_result_organism_associations = []
for compendium_organism in organisms:
compendium_result_organism_association = CompendiumResultOrganismAssociation()
compendium_result_organism_association.compendium_result = compendium_result
compendium_result_organism_association.organism = compendium_organism
compendium_result_organism_associations.append(
compendium_result_organism_association)
CompendiumResultOrganismAssociation.objects.bulk_create(
compendium_result_organism_associations)
job_context['compendium_result'] = compendium_result
logger.info("Compendium created!",
archive_path=archive_path,
organism_name=job_context['organism_name'])
# Upload the result to S3
timestamp = str(int(time.time()))
key = job_context['organism_name'] + "_" + str(compendium_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, key)
job_context['result'] = result
job_context['computed_files'] = [archive_computed_file]
job_context['success'] = True
log_state("end create result object", job_context["job"].id, result_start)
# TEMPORARY for iterating on compendia more quickly.
# Reset this so the end_job does clean up the job's non-input-data stuff.
job_context["work_dir"] = job_context["old_work_dir"]
return job_context
def create_compendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_COMPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
_prepare_input,
_prepare_frames,
_perform_imputation,
smashing_utils.write_non_data_files,
_create_result_objects,
utils.end_job])
return job_context
<|code_end|>
workers/data_refinery_workers/processors/create_quantpendia.py
<|code_start|>import os
import logging
import shutil
import time
from typing import Dict, List
import psutil
from django.utils import timezone
from django.conf import settings
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (ComputationalResult,
ComputedFile,
Organism,
Pipeline,
Sample,
CompendiumResult)
from data_refinery_common.utils import get_env_variable, FileUtils
from data_refinery_workers.processors import smashing_utils, utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
SMASHING_DIR = "/home/user/data_store/smashed/"
logger = get_and_configure_logger(__name__)
logger.setLevel(logging.getLevelName('DEBUG'))
def create_quantpendia(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.CREATE_QUANTPENDIA.value)
job_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
_make_dirs,
_download_files,
_add_metadata,
_make_archive,
_create_result_objects,
_remove_job_dir,
utils.end_job])
return job_context
@utils.cache_keys('time_start', 'num_samples', 'time_end', 'formatted_command', work_dir_key='job_dir')
def _download_files(job_context: Dict) -> Dict:
job_context['filtered_samples'] = {}
job_context['time_start'] = timezone.now()
num_samples = 0
for key, samples in job_context['samples'].items():
outfile_dir = job_context['output_dir'] + key + '/'
os.makedirs(outfile_dir, exist_ok=True)
logger.debug("Downloading quant.sf files for quantpendia.",
accession_code=key,
job_id=job_context['job_id'],
**get_process_stats())
# download quant.sf files directly into the dataset folder
num_samples += smashing_utils.sync_quant_files(outfile_dir, samples)
job_context['num_samples'] = num_samples
job_context['time_end'] = timezone.now()
job_context['formatted_command'] = "create_quantpendia.py"
logger.debug("Finished downloading quant.sf files for quantpendia.",
job_id=job_context['job_id'],
total_downloaded_files=num_samples,
**get_process_stats())
return job_context
@utils.cache_keys('metadata', work_dir_key='job_dir')
def _add_metadata(job_context: Dict) -> Dict:
logger.debug("Writing metadata for quantpendia.",
job_id=job_context['job_id'],
**get_process_stats())
smashing_utils.write_non_data_files(job_context)
shutil.copy("/home/user/README_QUANT.md", job_context["output_dir"] + "/README.md")
return job_context
@utils.cache_keys('archive_path', work_dir_key='job_dir')
def _make_archive(job_context: Dict):
compendia_organism = _get_organisms(job_context['samples']).first()
final_zip_base = job_context['job_dir'] + compendia_organism.name + "_rnaseq_compendia"
logger.debug("Generating archive.",
job_id=job_context['job_id'],
organism_name=compendia_organism.name,
**get_process_stats())
archive_path = shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
logger.debug("Quantpendia zip file generated.",
job_id=job_context['job_id'],
organism_name=compendia_organism.name,
**get_process_stats())
return {**job_context, 'archive_path': archive_path}
def _create_result_objects(job_context: Dict) -> Dict:
"""
Store and host the result as a ComputationalResult object.
"""
archive_path = job_context['archive_path']
compendia_organism = _get_organisms(job_context['samples']).first()
compendia_version = _get_next_compendia_version(compendia_organism)
result = ComputationalResult()
result.commands.append(" ".join(job_context['formatted_command']))
result.is_ccdl = True
result.is_public = True
result.time_start = job_context['time_start']
result.time_end = job_context['time_end']
try:
processor_key = "CREATE_QUANTPENDIA"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
archive_computed_file = ComputedFile()
archive_computed_file.absolute_file_path = archive_path
archive_computed_file.filename = FileUtils.get_filename(archive_path)
archive_computed_file.calculate_sha1()
archive_computed_file.calculate_size()
archive_computed_file.is_smashable = False
archive_computed_file.is_qn_target = False
archive_computed_file.result = result
archive_computed_file.is_compendia = True
archive_computed_file.quant_sf_only = True
archive_computed_file.compendia_organism = compendia_organism
archive_computed_file.compendia_version = compendia_version
archive_computed_file.save()
compendium_result = CompendiumResult()
compendium_result.quant_sf_only = True
compendium_result.result = result
compendium_result.primary_organism = compendia_organism
compendium_result.compendium_version = compendia_version
compendium_result.save()
logger.info("Quantpendia created! Uploading to S3.",
job_id=job_context['job_id'],
archive_path=archive_path,
organism_name=compendia_organism.name,
**get_process_stats())
# Upload the result to S3
timestamp = str(int(time.time()))
s3_key = compendia_organism.name + "_" + str(compendia_version) + "_" + timestamp + ".zip"
archive_computed_file.sync_to_s3(S3_BUCKET_NAME, s3_key)
job_context['result'] = result
job_context['success'] = True
return job_context
def _remove_job_dir(job_context: Dict):
""" remove the directory when the job is successful. At this point
the quantpendia was already zipped and uploaded. """
# don't remove the files when running locally or for tests
if settings.RUNNING_IN_CLOUD:
shutil.rmtree(job_context["job_dir"], ignore_errors=True)
return job_context
def _make_dirs(job_context: Dict):
dataset_id = str(job_context["dataset"].pk)
job_context["job_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
os.makedirs(job_context["job_dir"], exist_ok=True)
job_context["output_dir"] = job_context["job_dir"] + "output/"
os.makedirs(job_context["output_dir"], exist_ok=True)
return job_context
def get_process_stats():
BYTES_IN_GB = 1024 * 1024 * 1024
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
return {'total_cpu': psutil.cpu_percent(), 'process_ram': ram_in_GB}
def _get_organisms(aggregated_samples: Dict[str, Sample]) -> List[Organism]:
organisms = set()
for key, samples in aggregated_samples.items():
organism_ids = samples.values_list('organism__id', flat=True).distinct()
organisms.update(organism_ids)
return Organism.objects.filter(id__in=list(organisms))
def _get_next_compendia_version(organism: Organism) -> int:
last_compendia = ComputedFile.objects\
.filter(is_compendia=True, quant_sf_only=True, compendia_organism=organism)\
.order_by('-compendia_version').first()
if last_compendia:
return last_compendia.compendia_version + 1
# otherwise this is the first compendia that we are generating
return 1
<|code_end|>
workers/data_refinery_workers/processors/smashing_utils.py
<|code_start|># -*- coding: utf-8 -*-
import csv
import logging
import math
import os
import multiprocessing
import shutil
import time
from pathlib import Path
from typing import Dict, List, Tuple
from concurrent.futures import ThreadPoolExecutor
from django.utils import timezone
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as rlang
from rpy2.robjects.packages import importr
import numpy as np
import pandas as pd
import psutil
import rpy2.robjects as ro
import simplejson as json
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import ComputedFile, Sample
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
MULTIPROCESSING_MAX_THREAD_COUNT = max(1, math.floor(multiprocessing.cpu_count()/2) - 1)
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BODY_HTML = Path(
'data_refinery_workers/processors/smasher_email.min.html'
).read_text().replace('\n', '')
BODY_ERROR_HTML = Path(
'data_refinery_workers/processors/smasher_email_error.min.html'
).read_text().replace('\n', '')
BYTES_IN_GB = 1024 * 1024 * 1024
QN_CHUNK_SIZE = 10000
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName('DEBUG'))
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message,
total_cpu=psutil.cpu_percent(),
process_ram=ram_in_GB,
job_id=job_id)
if start_time:
logger.debug('Duration: %s' % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def prepare_files(job_context: Dict) -> Dict:
"""
Fetches and prepares the files to smash.
"""
start_prepare_files = log_state("start prepare files", job_context["job"].id)
found_files = False
job_context['filtered_samples'] = {}
job_context['input_files'] = {}
# `key` can either be the species name or experiment accession.
for key, samples in job_context["samples"].items():
smashable_files = []
seen_files = set()
for sample in samples:
smashable_file = sample.get_most_recent_smashable_result_file()
if smashable_file is not None and smashable_file not in seen_files:
smashable_files = smashable_files + [(smashable_file, sample)]
seen_files.add(smashable_file)
found_files = True
else:
sample_metadata = sample.to_metadata_dict()
job_context['filtered_samples'][sample.accession_code] = {
**sample_metadata,
'reason': 'This sample did not have a processed file associated with it in our database.',
'experiment_accession_code': get_experiment_accession(sample.accession_code, job_context['dataset'].data)
}
job_context['input_files'][key] = smashable_files
job_context['num_input_files'] = len(job_context['input_files'])
job_context['group_by_keys'] = list(job_context['input_files'].keys())
if not found_files:
raise utils.ProcessorJobError("Couldn't get any files to smash for Smash job!!",
success=False,
dataset_id=job_context['dataset'].id,
num_samples=len(job_context["samples"]))
dataset_id = str(job_context["dataset"].pk)
job_context["work_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
# Ensure we have a fresh smash directory
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
os.makedirs(job_context["work_dir"])
job_context["output_dir"] = job_context["work_dir"] + "output/"
os.makedirs(job_context["output_dir"])
log_state("end prepare files", job_context["job"].id, start_prepare_files)
return job_context
def _load_and_sanitize_file(computed_file_path) -> pd.DataFrame:
""" Read and sanitize a computed file """
data = pd.read_csv(computed_file_path,
sep='\t',
header=0,
index_col=0,
dtype={0: str, 1: np.float32},
error_bad_lines=False)
# Strip any funky whitespace
data.columns = data.columns.str.strip()
data = data.dropna(axis='columns', how='all')
# Make sure the index type is correct
data.index = data.index.map(str)
# Ensure that we don't have any dangling Brainarray-generated probe symbols.
# BA likes to leave '_at', signifying probe identifiers,
# on their converted, non-probe identifiers. It makes no sense.
# So, we chop them off and don't worry about it.
data.index = data.index.str.replace('_at', '')
# Remove any lingering Affymetrix control probes ("AFFX-")
data = data[~data.index.str.contains('AFFX-')]
# If there are any _versioned_ gene identifiers, remove that
# version information. We're using the latest brainarray for everything anyway.
# Jackie says this is okay.
# She also says that in the future, we may only want to do this
# for cross-technology smashes.
# This regex needs to be able to handle EGIDs in the form:
# ENSGXXXXYYYZZZZ.6
# and
# fgenesh2_kg.7__3016__AT5G35080.1 (via http://plants.ensembl.org/Arabidopsis_lyrata/ \
# Gene/Summary?g=fgenesh2_kg.7__3016__AT5G35080.1;r=7:17949732-17952000;t=fgenesh2_kg. \
# 7__3016__AT5G35080.1;db=core)
data.index = data.index.str.replace(r"(\.[^.]*)$", '')
# Squish duplicated rows together.
# XXX/TODO: Is mean the appropriate method here?
# We can make this an option in future.
# Discussion here: https://github.com/AlexsLemonade/refinebio/issues/186#issuecomment-395516419
data = data.groupby(data.index, sort=False).mean()
return data
def process_frame(work_dir, computed_file, sample_accession_code, aggregate_by) -> pd.DataFrame:
""" Downloads the computed file from S3 and tries to see if it's smashable.
Returns a data frame if the file can be processed or False otherwise. """
try:
# Download the file to a job-specific location so it
# won't disappear while we're using it.
computed_file_path = computed_file.get_synced_file_path(
path="%s%s" % (work_dir, computed_file.filename)
)
# Bail appropriately if this isn't a real file.
if not computed_file_path or not os.path.exists(computed_file_path):
logger.warning("Smasher received non-existent file path.",
computed_file_path=computed_file_path,
computed_file_id=computed_file.id)
return None
data = _load_and_sanitize_file(computed_file_path)
if len(data.columns) > 2:
# Most of the time, >1 is actually bad, but we also need to support
# two-channel samples. I think ultimately those should be given some kind of
# special consideration.
logger.info("Found a frame with more than 2 columns - this shouldn't happen!",
computed_file_path=computed_file_path,
computed_file_id=computed_file.id)
return None
# via https://github.com/AlexsLemonade/refinebio/issues/330:
# aggregating by experiment -> return untransformed output from tximport
# aggregating by species -> log2(x + 1) tximport output
if aggregate_by == 'SPECIES' and computed_file.has_been_log2scaled():
data = data + 1
data = np.log2(data)
# Ideally done in the NO-OPPER, but sanity check here.
if (not computed_file.has_been_log2scaled()) and (data.max() > 100).any():
logger.info("Detected non-log2 microarray data.", computed_file_id=computed_file.id)
data = np.log2(data)
# Explicitly title this dataframe
try:
data.columns = [sample_accession_code]
except ValueError as e:
# This sample might have multiple channels, or something else.
# Don't mess with it.
logger.warn("Smasher found multi-channel column (probably) - skipping!",
exc_info=1,
computed_file_path=computed_file_path,)
return None
except Exception as e:
# Okay, somebody probably forgot to create a SampleComputedFileAssociation
# Don't mess with it.
logger.warn("Smasher found very bad column title - skipping!",
exc_info=1,
computed_file_path=computed_file_path)
return None
except Exception as e:
logger.exception("Unable to smash file", file=computed_file_path)
return None
# TEMPORARY for iterating on compendia more quickly.
# finally:
# # Delete before archiving the work dir
# if computed_file_path and os.path.exists(computed_file_path):
# os.remove(computed_file_path)
return data
def load_first_pass_data_if_cached(work_dir: str):
path = os.path.join(work_dir, 'first_pass.csv')
try:
with open(path, newline='') as csvfile:
reader = csv.reader(csvfile)
gene_ids = next(reader)
microarray_columns = next(reader)
rnaseq_columns = next(reader)
return {'gene_ids': gene_ids,
'microarray_columns': microarray_columns,
'rnaseq_columns': rnaseq_columns}
# If the file doesn't exist then the gene ids aren't cached. Any
# other exception should be handled and higher in the stack.
except FileNotFoundError:
return None
def cache_first_pass(job_context: Dict,
gene_ids: List[str],
microarray_columns: List[str],
rnaseq_columns: List[str]):
try:
path = os.path.join(job_context['work_dir'], 'first_pass.csv')
logger.info("Caching gene_ids, microarray_columns, and rnaseq_columns to %s",
path,
job_id=job_context['job'].id)
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(gene_ids)
writer.writerow(microarray_columns)
writer.writerow(rnaseq_columns)
# Nothing in the above try should raise an exception, but if it
# does don't waste the work we did in the first pass.
except Exception:
logger.exception('Error writing gene identifiers to CSV file.',
job_id=job_context['job'].id)
def process_frames_for_key(key: str,
input_files: List[Tuple[ComputedFile, Sample]],
job_context: Dict) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the keys 'microarray_matrix' and
'rnaseq_matrix' with pandas dataframes containing all of the
samples' data. Also adds the key 'unsmashable_files' containing a
list of paths that were determined to be unsmashable.
"""
start_gene_ids = log_state("Collecting all gene identifiers for key {}".format(key),
job_context["job"].id)
# Build up a list of gene identifiers because these will be the
# rows of our matrices, and we want to preallocate them so we need
# to know them all.
## We may have built this list in a previous job, check to see if it's cached:
cached_data = load_first_pass_data_if_cached(job_context['work_dir'])
first_pass_was_cached = False
if cached_data:
logger.info(("The data from the first pass was cached, so we're using "
"that and skipping the first pass."),
job_id=job_context['job'].id)
first_pass_was_cached = True
all_gene_identifiers = cached_data["gene_ids"]
microarray_columns = cached_data["microarray_columns"]
rnaseq_columns = cached_data["rnaseq_columns"]
else:
gene_identifier_counts = {}
microarray_columns = []
rnaseq_columns = []
for index, (computed_file, sample) in enumerate(input_files):
log_state('1st processing frame {}'.format(index), job_context["job"].id)
frame_data = process_frame(job_context["work_dir"],
computed_file,
sample.accession_code,
job_context['dataset'].aggregate_by)
if frame_data is None:
# we were unable to process this sample, so we drop
logger.warning('Unable to smash file',
computed_file=computed_file.id,
dataset_id=job_context['dataset'].id,
job_id=job_context["job"].id)
sample_metadata = sample.to_metadata_dict()
job_context['filtered_samples'][sample.accession_code] = {
**sample_metadata,
'reason': 'The file associated with this sample did not pass the QC checks we apply before aggregating.',
'filename': computed_file.filename,
'experiment_accession_code': get_experiment_accession(sample.accession_code, job_context['dataset'].data)
}
continue
# Count how many frames are in each tech so we can preallocate
# the matrices in both directions.
for gene_id in frame_data.index:
if gene_id in gene_identifier_counts:
gene_identifier_counts[gene_id] += 1
else:
gene_identifier_counts[gene_id] = 1
# Each dataframe should only have 1 column, but it's
# returned as a list so use extend.
if sample.technology == 'MICROARRAY':
microarray_columns.extend(frame_data.columns)
elif sample.technology == 'RNA-SEQ':
rnaseq_columns.extend(frame_data.columns)
# We only want to use gene identifiers which are present
# in >50% of the samples. We're doing this because a large
# number of gene identifiers present in only a modest
# number of experiments have leaked through. We wouldn't
# necessarily want to do this if we'd mapped all the data
# to ENSEMBL identifiers successfully.
total_samples = len(microarray_columns) + len(rnaseq_columns)
all_gene_identifiers = [gene_id for gene_id in gene_identifier_counts
if gene_identifier_counts[gene_id] > (total_samples * 0.5)]
all_gene_identifiers.sort()
del gene_identifier_counts
log_template = ("Collected {0} gene identifiers for {1} across"
" {2} micrarry samples and {3} RNA-Seq samples.")
log_state(log_template.format(len(all_gene_identifiers),
key,
len(microarray_columns),
len(rnaseq_columns)),
job_context["job"].id,
start_gene_ids)
# Temporarily only cache mouse compendia because it may not succeed.
if not first_pass_was_cached and key == "MUS_MUSCULUS":
cache_first_pass(job_context, all_gene_identifiers, microarray_columns, rnaseq_columns)
start_build_matrix = log_state("Beginning to build the full matrices.",
job_context["job"].id)
# Sort the columns so that the matrices are in predictable orders.
microarray_columns.sort()
rnaseq_columns.sort()
# Preallocate the matrices to be the exact size we will need. This
# should prevent any operations from happening while we build it
# up, so the only RAM used will be needed.
job_context['microarray_matrix'] = pd.DataFrame(data=None,
index=all_gene_identifiers,
columns=microarray_columns,
dtype=np.float32)
job_context['rnaseq_matrix'] = pd.DataFrame(data=None,
index=all_gene_identifiers,
columns=rnaseq_columns,
dtype=np.float32)
for index, (computed_file, sample) in enumerate(input_files):
log_state('2nd processing frame {}'.format(index), job_context["job"].id)
frame_data = process_frame(job_context["work_dir"],
computed_file,
sample.accession_code,
job_context['dataset'].aggregate_by)
if frame_data is None:
job_context['unsmashable_files'].append(computed_file.filename)
sample_metadata = sample.to_metadata_dict()
job_context['filtered_samples'][sample.accession_code] = {
**sample_metadata,
'reason': 'The file associated with this sample did not contain a vector that fit the expected dimensions of the matrix.',
'filename': computed_file.filename,
'experiment_accession_code': get_experiment_accession(sample.accession_code, job_context['dataset'].data)
}
continue
frame_data = frame_data.reindex(all_gene_identifiers)
# The dataframe for each sample will only have one column
# whose header will be the accession code.
column = frame_data.columns[0]
if sample.technology == 'MICROARRAY':
job_context['microarray_matrix'][column] = frame_data.values
elif sample.technology == 'RNA-SEQ':
job_context['rnaseq_matrix'][column] = frame_data.values
job_context['num_samples'] = 0
if job_context['microarray_matrix'] is not None:
job_context['num_samples'] += len(job_context['microarray_matrix'].columns)
if job_context['rnaseq_matrix'] is not None:
job_context['num_samples'] += len(job_context['rnaseq_matrix'].columns)
log_state("Built full matrices for key {}".format(key),
job_context["job"].id,
start_build_matrix)
return job_context
# Modified from: http://yaoyao.codes/pandas/2018/01/23/pandas-split-a-dataframe-into-chunks
def _index_marks(num_columns, chunk_size):
return range(chunk_size, math.ceil(num_columns / chunk_size) * chunk_size, chunk_size)
def _split_dataframe_columns(dataframe, chunk_size):
indices = _index_marks(dataframe.shape[1], chunk_size)
return np.split(dataframe, indices, axis=1)
def _quantile_normalize_matrix(target_vector, original_matrix):
preprocessCore = importr('preprocessCore')
as_numeric = rlang("as.numeric")
data_matrix = rlang('data.matrix')
# Convert the smashed frames to an R numeric Matrix
target_vector = as_numeric(target_vector)
# Do so in chunks if the matrix is too large.
if original_matrix.shape[1] <= QN_CHUNK_SIZE:
merged_matrix = data_matrix(original_matrix)
normalized_matrix = preprocessCore.normalize_quantiles_use_target(x=merged_matrix,
target=target_vector,
copy=True)
# And finally convert back to Pandas
ar = np.array(normalized_matrix)
new_merged = pd.DataFrame(ar,
columns=original_matrix.columns,
index=original_matrix.index)
else:
matrix_chunks = _split_dataframe_columns(original_matrix, QN_CHUNK_SIZE)
for i, chunk in enumerate(matrix_chunks):
R_chunk = data_matrix(chunk)
normalized_chunk = preprocessCore.normalize_quantiles_use_target(
x=R_chunk,
target=target_vector,
copy=True
)
ar = np.array(normalized_chunk)
start_column = i * QN_CHUNK_SIZE
end_column = (i + 1) * QN_CHUNK_SIZE
original_matrix.iloc[:, start_column:end_column] = ar
new_merged = original_matrix
return new_merged
def _test_qn(merged_matrix):
""" Selects a list of 100 random pairs of columns and performs the KS Test on them.
Returns a list of tuples with the results of the KN test (statistic, pvalue) """
# Verify this QN, related:
# https://github.com/AlexsLemonade/refinebio/issues/599#issuecomment-422132009
data_matrix = rlang('data.matrix')
as_numeric = rlang("as.numeric")
set_seed = rlang("set.seed")
combn = rlang("combn")
ncol = rlang("ncol")
ks_test = rlang("ks.test")
which = rlang("which")
merged_R_matrix = data_matrix(merged_matrix)
set_seed(123)
n = ncol(merged_R_matrix)[0]
m = 2
# Not enough columns to perform KS test - either bad smash or single sample smash.
if n<m: return None
# This wont work with larger matricies
# https://github.com/AlexsLemonade/refinebio/issues/1860
ncolumns = ncol(merged_R_matrix)
if ncolumns[0] <= 200:
# Convert to NP, Shuffle, Return to R
combos = combn(ncolumns, 2)
ar = np.array(combos)
np.random.shuffle(np.transpose(ar))
else:
indexes = [*range(ncolumns[0])]
np.random.shuffle(indexes)
ar = np.array([*zip(indexes[0:100], indexes[100:200])])
nr, nc = ar.shape
combos = ro.r.matrix(ar, nrow=nr, ncol=nc)
result = []
# adapted from
# https://stackoverflow.com/questions/9661469/r-t-test-over-all-columns
# apply KS test to randomly selected pairs of columns (samples)
for i in range(1, min(ncol(combos)[0], 100)):
value1 = combos.rx(1, i)[0]
value2 = combos.rx(2, i)[0]
test_a = merged_R_matrix.rx(True, value1)
test_b = merged_R_matrix.rx(True, value2)
# RNA-seq has a lot of zeroes in it, which
# breaks the ks_test. Therefore we want to
# filter them out. To do this we drop the
# lowest half of the values. If there's
# still zeroes in there, then that's
# probably too many zeroes so it's okay to
# fail.
median_a = np.median(test_a)
median_b = np.median(test_b)
# `which` returns indices which are
# 1-indexed. Python accesses lists with
# zero-indexes, even if that list is
# actually an R vector. Therefore subtract
# 1 to account for the difference.
test_a = [test_a[i-1] for i in which(test_a > median_a)]
test_b = [test_b[i-1] for i in which(test_b > median_b)]
# The python list comprehension gives us a
# python list, but ks_test wants an R
# vector so let's go back.
test_a = as_numeric(test_a)
test_b = as_numeric(test_b)
ks_res = ks_test(test_a, test_b)
statistic = ks_res.rx('statistic')[0][0]
pvalue = ks_res.rx('p.value')[0][0]
result.append((statistic, pvalue))
return result
def quantile_normalize(job_context: Dict, ks_check=True, ks_stat=0.001) -> Dict:
"""
Apply quantile normalization.
"""
# Prepare our QN target file
organism = job_context['organism']
if not organism.qn_target:
failure_reason = "Could not find QN target for Organism: " + str(organism)
job_context['dataset'].success = False
job_context['dataset'].failure_reason = failure_reason
job_context['dataset'].save()
raise utils.ProcessorJobError(failure_reason,
success=False,
organism=organism,
dataset_id=job_context['dataset'].id)
qn_target_path = organism.qn_target.computedfile_set.latest().sync_from_s3()
qn_target_frame = pd.read_csv(qn_target_path, sep='\t', header=None,
index_col=None, error_bad_lines=False)
# Prepare our RPy2 bridge
pandas2ri.activate()
# Remove un-quantiled normalized matrix from job_context
# because we no longer need it.
merged_no_qn = job_context.pop('merged_no_qn')
# Perform the Actual QN
new_merged = _quantile_normalize_matrix(qn_target_frame[0], merged_no_qn)
# And add the quantile normalized matrix to job_context.
job_context['merged_qn'] = new_merged
# For now, don't test the QN for mouse/human. This never fails on
# smasher jobs and is OOM-killing our very large compendia
# jobs. Let's run this manually after we have a compendia job
# actually finish.
if organism.name in ["MUS_MUSCULUS", "HOMO_SAPIENS"]: return job_context
ks_res = _test_qn(new_merged)
if ks_res:
for (statistic, pvalue) in ks_res:
job_context['ks_statistic'] = statistic
job_context['ks_pvalue'] = pvalue
# We're unsure of how strigent to be about
# the pvalue just yet, so we're extra lax
# rather than failing tons of tests. This may need tuning.
if ks_check and (statistic > ks_stat or pvalue < 0.8):
job_context['ks_warning'] = ("Failed Kolmogorov Smirnov test! Stat: " +
str(statistic) + ", PVal: " + str(pvalue))
else:
logger.warning("Not enough columns to perform KS test - either bad smash or single sample smash.",
dataset_id=job_context['dataset'].id)
return job_context
def compile_metadata(job_context: Dict) -> Dict:
"""Compiles metadata about the job.
Returns a new dict containing the metadata, not the job_context.
"""
metadata = {}
metadata['num_samples'] = job_context['num_samples']
metadata['num_experiments'] = job_context['experiments'].count()
metadata['quant_sf_only'] = job_context['dataset'].quant_sf_only
if not job_context['dataset'].quant_sf_only:
metadata['aggregate_by'] = job_context["dataset"].aggregate_by
metadata['scale_by'] = job_context["dataset"].scale_by
# https://github.com/AlexsLemonade/refinebio/pull/421#discussion_r203799646
# TODO: do something with these.
# metadata['non_aggregated_files'] = job_context["unsmashable_files"]
metadata['ks_statistic'] = job_context.get("ks_statistic", None)
metadata['ks_pvalue'] = job_context.get("ks_pvalue", None)
metadata['ks_warning'] = job_context.get("ks_warning", None)
metadata['quantile_normalized'] = job_context['dataset'].quantile_normalize
filtered_samples = job_context['filtered_samples']
samples = {}
for sample in job_context["dataset"].get_samples():
if sample.accession_code in filtered_samples:
# skip the samples that were filtered
continue
samples[sample.accession_code] = sample.to_metadata_dict()
metadata['samples'] = samples
experiments = {}
for experiment in job_context["dataset"].get_experiments():
experiment_metadata = experiment.to_metadata_dict()
# exclude filtered samples from experiment metadata
all_samples = experiment_metadata['sample_accession_codes']
all_samples = [code for code in all_samples if code not in filtered_samples]
experiment_metadata['sample_accession_codes'] = all_samples
experiments[experiment.accession_code] = experiment_metadata
metadata['experiments'] = experiments
return metadata
def write_non_data_files(job_context: Dict) -> Dict:
"""Writes the files that are not the actual data of the dataset.
This include LICENSE.txt and README.md files and the metadata.
Adds the key `metadata` to job_context and populates it with all
the metadata that needs to be written.
"""
job_context['metadata'] = compile_metadata(job_context)
shutil.copy("README_DATASET.md", job_context["output_dir"] + "README.md")
shutil.copy("LICENSE_DATASET.txt", job_context["output_dir"] + "LICENSE.TXT")
# Write samples metadata to TSV
try:
write_tsv_json(job_context)
# Metadata to JSON
job_context['metadata']['created_at'] = timezone.now().strftime('%Y-%m-%dT%H:%M:%S')
aggregated_metadata_path = os.path.join(job_context["output_dir"],
'aggregated_metadata.json')
with open(aggregated_metadata_path, 'w', encoding='utf-8') as metadata_file:
json.dump(job_context['metadata'], metadata_file, indent=4, sort_keys=True)
if job_context['filtered_samples']:
# generate filtered samples file only if some samples were skipped
filtered_samples_path = os.path.join(job_context["output_dir"],
'filtered_samples_metadata.json')
with open(filtered_samples_path, 'w',encoding='utf-8') as metadata_file:
json.dump(job_context['filtered_samples'], metadata_file, indent=4, sort_keys=True)
columns = get_tsv_columns(job_context['filtered_samples'])
filtered_samples_tsv_path = os.path.join(job_context["output_dir"],
'filtered_samples_metadata.tsv')
with open(filtered_samples_tsv_path, 'w', encoding='utf-8') as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter='\t', extrasaction='ignore')
dw.writeheader()
for sample_metadata in job_context['filtered_samples'].values():
dw.writerow(get_tsv_row_data(sample_metadata, job_context["dataset"].data))
except Exception as e:
logger.exception("Failed to write metadata TSV!", job_id=job_context['job'].id)
return job_context
def get_experiment_accession(sample_accession_code, dataset_data):
for experiment_accession, samples in dataset_data.items():
if sample_accession_code in samples:
return experiment_accession
return "" # Should never happen, because the sample is by definition in the dataset
def _add_annotation_column(annotation_columns, column_name):
"""Add annotation column names in place.
Any column_name that starts with "refinebio_" will be skipped.
"""
if not column_name.startswith("refinebio_"):
annotation_columns.add(column_name)
def _add_annotation_value(row_data, col_name, col_value, sample_accession_code):
"""Adds a new `col_name` key whose value is `col_value` to row_data.
If col_name already exists in row_data with different value, print
out a warning message.
"""
# Generate a warning message if annotation field name starts with
# "refinebio_". This should rarely (if ever) happen.
if col_name.startswith("refinebio_"):
logger.warning(
"Annotation value skipped",
annotation_field=col_name,
annotation_value=col_value,
sample_accession_code=sample_accession_code
)
elif col_name not in row_data:
row_data[col_name] = col_value
# Generate a warning message in case of conflicts of annotation values.
# (Requested by Dr. Jackie Taroni)
elif row_data[col_name] != col_value:
logger.warning(
"Conflict of values found in column %s: %s vs. %s" % (
col_name, row_data[col_name], col_value),
sample_accession_code=sample_accession_code
)
def get_tsv_row_data(sample_metadata, dataset_data):
"""Returns field values based on input sample_metadata.
Some annotation fields are treated specially because they are more
important. See `get_tsv_columns` function above for details.
"""
sample_accession_code = sample_metadata.get('refinebio_accession_code', '')
row_data = dict()
for meta_key, meta_value in sample_metadata.items():
# If the field is a refinebio-specific field, simply copy it.
if meta_key != 'refinebio_annotations':
row_data[meta_key] = meta_value
continue
# Decompose sample_metadata["refinebio_annotations"], which is
# an array of annotations.
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# "characteristic" in ArrayExpress annotation
if sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "characteristic":
for pair_dict in annotation_value:
if 'category' in pair_dict and 'value' in pair_dict:
col_name, col_value = pair_dict['category'], pair_dict['value']
_add_annotation_value(row_data, col_name, col_value,
sample_accession_code)
# "variable" in ArrayExpress annotation
elif sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "variable":
for pair_dict in annotation_value:
if 'name' in pair_dict and 'value' in pair_dict:
col_name, col_value = pair_dict['name'], pair_dict['value']
_add_annotation_value(row_data, col_name, col_value,
sample_accession_code)
# Skip "source" field ArrayExpress sample's annotation
elif sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "source":
continue
# "characteristics_ch1" in GEO annotation
elif sample_metadata.get('refinebio_source_database', '') == "GEO" \
and annotation_key == "characteristics_ch1": # array of strings
for pair_str in annotation_value:
if ':' in pair_str:
col_name, col_value = pair_str.split(':', 1)
col_value = col_value.strip()
_add_annotation_value(row_data, col_name, col_value,
sample_accession_code)
# If annotation_value includes only a 'name' key, extract its value directly:
elif isinstance(annotation_value, dict) \
and len(annotation_value) == 1 and 'name' in annotation_value:
_add_annotation_value(row_data, annotation_key, annotation_value['name'],
sample_accession_code)
# If annotation_value is a single-element array, extract the element directly:
elif isinstance(annotation_value, list) and len(annotation_value) == 1:
_add_annotation_value(row_data, annotation_key, annotation_value[0],
sample_accession_code)
# Otherwise save all annotation fields in separate columns
else:
_add_annotation_value(row_data, annotation_key, annotation_value,
sample_accession_code)
row_data["experiment_accession"] = get_experiment_accession(sample_accession_code,
dataset_data)
return row_data
def get_tsv_columns(samples_metadata):
"""Returns an array of strings that will be written as a TSV file's
header. The columns are based on fields found in samples_metadata.
Some nested annotation fields are taken out as separate columns
because they are more important than the others.
"""
refinebio_columns = set()
annotation_columns = set()
for sample_metadata in samples_metadata.values():
for meta_key, meta_value in sample_metadata.items():
if meta_key != 'refinebio_annotations':
refinebio_columns.add(meta_key)
continue
# Decompose sample_metadata["annotations"], which is an array of annotations!
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# For ArrayExpress samples, take out the fields
# nested in "characteristic" as separate columns.
if sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "characteristic":
for pair_dict in annotation_value:
if 'category' in pair_dict and 'value' in pair_dict:
_add_annotation_column(annotation_columns, pair_dict['category'])
# For ArrayExpress samples, also take out the fields
# nested in "variable" as separate columns.
elif sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "variable":
for pair_dict in annotation_value:
if 'name' in pair_dict and 'value' in pair_dict:
_add_annotation_column(annotation_columns, pair_dict['name'])
# For ArrayExpress samples, skip "source" field
elif sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS" \
and annotation_key == "source":
continue
# For GEO samples, take out the fields nested in
# "characteristics_ch1" as separate columns.
elif sample_metadata.get('refinebio_source_database', '') == "GEO" \
and annotation_key == "characteristics_ch1": # array of strings
for pair_str in annotation_value:
if ':' in pair_str:
tokens = pair_str.split(':', 1)
_add_annotation_column(annotation_columns, tokens[0])
# Saves all other annotation fields in separate columns
else:
_add_annotation_column(annotation_columns, annotation_key)
# Return sorted columns, in which "refinebio_accession_code" and "experiment_accession" are
# always first, followed by the other refinebio columns (in alphabetic order), and
# annotation columns (in alphabetic order) at the end.
refinebio_columns.discard('refinebio_accession_code')
return ['refinebio_accession_code', 'experiment_accession'] + sorted(refinebio_columns) \
+ sorted(annotation_columns)
def write_tsv_json(job_context):
"""Writes tsv files on disk.
If the dataset is aggregated by species, also write species-level
JSON file.
"""
# Avoid pulling this out of job_context repeatedly.
metadata = job_context['metadata']
# Uniform TSV header per dataset
columns = get_tsv_columns(metadata['samples'])
# Per-Experiment Metadata
if job_context["dataset"].aggregate_by == "EXPERIMENT":
tsv_paths = []
for experiment_title, experiment_data in metadata['experiments'].items():
experiment_dir = job_context["output_dir"] + experiment_title + '/'
experiment_dir = experiment_dir.encode('ascii', 'ignore')
os.makedirs(experiment_dir, exist_ok=True)
tsv_path = experiment_dir.decode("utf-8") + 'metadata_' + experiment_title + '.tsv'
tsv_path = tsv_path.encode('ascii', 'ignore')
tsv_paths.append(tsv_path)
with open(tsv_path, 'w', encoding='utf-8') as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter='\t', extrasaction='ignore')
dw.writeheader()
for sample_accession_code, sample_metadata in metadata['samples'].items():
if sample_accession_code in experiment_data['sample_accession_codes']:
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
return tsv_paths
# Per-Species Metadata
elif job_context["dataset"].aggregate_by == "SPECIES":
tsv_paths = []
for species in job_context['group_by_keys']:
species_dir = job_context["output_dir"] + species + '/'
os.makedirs(species_dir, exist_ok=True)
samples_in_species = []
tsv_path = species_dir + "metadata_" + species + '.tsv'
tsv_paths.append(tsv_path)
with open(tsv_path, 'w', encoding='utf-8') as tsv_file:
# See http://www.lucainvernizzi.net/blog/2015/08/03/8x-speed-up-for-python-s-csv-dictwriter/
# about extrasaction.
dw = csv.DictWriter(tsv_file, columns, delimiter='\t', extrasaction='ignore')
dw.writeheader()
i = 0
for sample_metadata in metadata['samples'].values():
if sample_metadata.get('refinebio_organism', '') == species:
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
samples_in_species.append(sample_metadata)
i = i + 1
if i % 1000 == 0:
progress_template = ('Done with {0} out of {1} lines of metadata '
'for species {2}')
log_state(progress_template.format(i, len(metadata['samples']), species),
job_context['job'].id)
# Writes a json file for current species:
if len(samples_in_species):
species_metadata = {
'species': species,
'samples': samples_in_species
}
json_path = species_dir + "metadata_" + species + '.json'
with open(json_path, 'w', encoding='utf-8') as json_file:
json.dump(species_metadata, json_file, indent=4, sort_keys=True)
return tsv_paths
# All Metadata
else:
all_dir = job_context["output_dir"] + "ALL/"
os.makedirs(all_dir, exist_ok=True)
tsv_path = all_dir + 'metadata_ALL.tsv'
with open(tsv_path, 'w', encoding='utf-8') as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter='\t', extrasaction='ignore')
dw.writeheader()
for sample_metadata in metadata['samples'].values():
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
return [tsv_path]
def download_computed_file(download_tuple: Tuple[ComputedFile, str]):
""" this function downloads the latest computed file. Receives a tuple with
the computed file and the path where it needs to be downloaded
This is used to parallelize downloading quantsf files. """
(latest_computed_file, output_file_path) = download_tuple
try:
latest_computed_file.get_synced_file_path(path=output_file_path)
except:
# Let's not fail if there's an error syncing one of the quant.sf files
logger.exception('Failed to sync computed file', computed_file_id=latest_computed_file.pk)
def sync_quant_files(output_path, samples: List[Sample]):
""" Takes a list of ComputedFiles and copies the ones that are quant files to the provided directory.
Returns the total number of samples that were included """
num_samples = 0
page_size = 100
# split the samples in groups and download each one individually
with ThreadPoolExecutor(max_workers=MULTIPROCESSING_MAX_THREAD_COUNT) as executor:
# for each sample we need it's latest quant.sf file we don't want to query the db
# for all of them, so we do it in groups of 100, and then download all of the computed_files
# in parallel
for sample_page in (samples[i*page_size:i+page_size] for i in range(0, len(samples), page_size)):
sample_and_computed_files = []
for sample in sample_page:
latest_computed_file = sample.get_most_recent_quant_sf_file()
if not latest_computed_file:
continue
output_file_path = output_path + sample.accession_code + "_quant.sf"
sample_and_computed_files.append((latest_computed_file, output_file_path))
# download this set of files, this will take a few seconds that should also help the db recover
executor.map(download_computed_file, sample_and_computed_files)
num_samples += len(sample_and_computed_files)
return num_samples
<|code_end|>
|
Smasher jobs don't mark datasets as failures when they fail
### Context
There's a couple places in the smasher code where we delay failing the job so that the job can make it to the `_notify()` function so it can let the user know that their dataset failed to process.
### Problem or idea
The job eventually gets marked as a failure like we want. However the dataset object does not, nor does the failure_reason get set on the dataset object. This means that the frontend just forever shows the dataset as processing.
### Solution or next step
Fix this bug by fixing the smasher's flow. Doing it like that is such a hack. We should have two notify functions, one for successes, one for failures. If the job fails we should call the `_notify_failure()` function and then properly exit the processor, setting the `failure_reason` and `success` fields of the dataset object. If we succeed then the pipeline should be allowed to continue to the `notify_success()` function.
| workers/data_refinery_workers/processors/smasher.py
<|code_start|># -*- coding: utf-8 -*-
import boto3
import csv
import os
import rpy2
import rpy2.robjects as ro
import shutil
import simplejson as json
import string
import warnings
import requests
import psutil
import logging
import time
from botocore.exceptions import ClientError
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from pathlib import Path
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as rlang
from rpy2.robjects.packages import importr
from sklearn import preprocessing
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
OriginalFile,
Pipeline,
SampleResultAssociation,
Dataset,
)
from data_refinery_common.utils import get_env_variable, calculate_file_size, calculate_sha1
from data_refinery_workers.processors import utils, smashing_utils
from urllib.parse import quote
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BODY_HTML = (
Path("data_refinery_workers/processors/smasher_email.min.html").read_text().replace("\n", "")
)
BODY_ERROR_HTML = (
Path("data_refinery_workers/processors/smasher_email_error.min.html")
.read_text()
.replace("\n", "")
)
BYTES_IN_GB = 1024 * 1024 * 1024
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName("DEBUG"))
PROCESS_POOL_SIZE = max(1, int(psutil.cpu_count() / 2 - 1))
SCALERS = {
"MINMAX": preprocessing.MinMaxScaler,
"STANDARD": preprocessing.StandardScaler,
"ROBUST": preprocessing.RobustScaler,
}
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message, total_cpu=psutil.cpu_percent(), process_ram=ram_in_GB, job_id=job_id)
if start_time:
logger.debug("Duration: %s" % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _inner_join(job_context: Dict) -> pd.DataFrame:
"""Performs an inner join across the all_frames key of job_context.
Returns a dataframe, not the job_context.
TODO: This function should be mostly unnecessary now because we
pretty much do this in the smashing utils but I don't want to rip
it out right now .
"""
# Merge all of the frames we've gathered into a single big frame, skipping duplicates.
# TODO: If the very first frame is the wrong platform, are we boned?
merged = job_context["all_frames"][0]
i = 1
old_len_merged = len(merged)
merged_backup = merged
while i < len(job_context["all_frames"]):
frame = job_context["all_frames"][i]
i = i + 1
if i % 1000 == 0:
logger.info("Smashing keyframe", i=i, job_id=job_context["job"].id)
# I'm not sure where these are sneaking in from, but we don't want them.
# Related: https://github.com/AlexsLemonade/refinebio/issues/390
breaker = False
for column in frame.columns:
if column in merged.columns:
breaker = True
if breaker:
logger.warning(
"Column repeated for smash job!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
column=column,
)
continue
# This is the inner join, the main "Smash" operation
merged = merged.merge(frame, how="inner", left_index=True, right_index=True)
new_len_merged = len(merged)
if new_len_merged < old_len_merged:
logger.warning(
"Dropped rows while smashing!",
dataset_id=job_context["dataset"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
)
if new_len_merged == 0:
logger.warning(
"Skipping a bad merge frame!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
bad_frame_number=i,
)
merged = merged_backup
new_len_merged = len(merged)
try:
job_context["unsmashable_files"].append(frame.columns[0])
except Exception:
# Something is really, really wrong with this frame.
pass
old_len_merged = len(merged)
merged_backup = merged
return merged
def process_frames_for_key(key: str, input_files: List[ComputedFile], job_context: Dict) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the key 'all_frames', a list of pandas
dataframes containing all the samples' data. Also adds the key
'unsmashable_files' containing a list of paths that were
determined to be unsmashable.
"""
job_context["original_merged"] = pd.DataFrame()
start_all_frames = log_state(
"Building list of all_frames key {}".format(key), job_context["job"].id
)
job_context["all_frames"] = []
for (computed_file, sample) in input_files:
frame_data = smashing_utils.process_frame(
job_context["work_dir"],
computed_file,
sample.accession_code,
job_context["dataset"].aggregate_by,
)
if frame_data is not None:
job_context["all_frames"].append(frame_data)
else:
logger.warning(
"Unable to smash file",
computed_file=computed_file.id,
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
)
job_context["unsmashable_files"].append(computed_file.filename)
log_state(
"Finished building list of all_frames key {}".format(key),
job_context["job"].id,
start_all_frames,
)
return job_context
def _smash_key(job_context: Dict, key: str, input_files: List[ComputedFile]) -> Dict:
"""Smash all of the input files together for a given key.
Steps:
Combine common genes (pandas merge)
Transpose such that genes are columns (features)
Scale features with sci-kit learn
Transpose again such that samples are columns and genes are rows
"""
start_smash = log_state("start _smash_key for {}".format(key), job_context["job"].id)
# Check if we need to copy the quant.sf files
if job_context["dataset"].quant_sf_only:
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
samples = [sample for (_, sample) in input_files]
job_context["num_samples"] += smashing_utils.sync_quant_files(outfile_dir, samples)
# we ONLY want to give quant sf files to the user if that's what they requested
return job_context
job_context = process_frames_for_key(key, input_files, job_context)
if len(job_context["all_frames"]) < 1:
logger.error(
"Was told to smash a key with no frames!", job_id=job_context["job"].id, key=key
)
# TODO: is this the proper way to handle this? I can see us
# not wanting to fail an entire dataset because one experiment
# had a problem, but I also think it could be problematic to
# just skip an experiment and pretend nothing went wrong.
return job_context
merged = _inner_join(job_context)
job_context["original_merged"] = merged
log_state("end build all frames", job_context["job"].id, start_smash)
start_qn = log_state("start qn", job_context["job"].id, start_smash)
# Quantile Normalization
if job_context["dataset"].quantile_normalize:
try:
job_context["merged_no_qn"] = merged
job_context["organism"] = job_context["dataset"].get_samples().first().organism
job_context = smashing_utils.quantile_normalize(job_context)
merged = job_context.get("merged_qn", None)
# We probably don't have an QN target or there is another error,
# so let's fail gracefully.
assert merged is not None, "Problem occured during quantile normalization: No merged_qn"
except Exception as e:
logger.exception(
"Problem occured during quantile normalization",
dataset_id=job_context["dataset"].id,
processor_job_id=job_context["job"].id,
)
job_context["dataset"].success = False
if not job_context["job"].failure_reason:
job_context["job"].failure_reason = "Failure reason: " + str(e)
job_context["dataset"].failure_reason = "Failure reason: " + str(e)
job_context["dataset"].save()
# Delay failing this pipeline until the failure notify has been sent
job_context["job"].success = False
job_context["failure_reason"] = str(e)
return job_context
# End QN
log_state("end qn", job_context["job"].id, start_qn)
# Transpose before scaling
# Do this even if we don't want to scale in case transpose
# modifies the data in any way. (Which it shouldn't but
# we're paranoid.)
# TODO: stop the paranoia because Josh has alleviated it.
transposed = merged.transpose()
start_scaler = log_state("starting scaler", job_context["job"].id)
# Scaler
if job_context["dataset"].scale_by != "NONE":
scale_funtion = SCALERS[job_context["dataset"].scale_by]
scaler = scale_funtion(copy=True)
scaler.fit(transposed)
scaled = pd.DataFrame(
scaler.transform(transposed), index=transposed.index, columns=transposed.columns
)
# Untranspose
untransposed = scaled.transpose()
else:
# Wheeeeeeeeeee
untransposed = transposed.transpose()
log_state("end scaler", job_context["job"].id, start_scaler)
# This is just for quality assurance in tests.
job_context["final_frame"] = untransposed
# Write to temp file with dataset UUID in filename.
subdir = ""
if job_context["dataset"].aggregate_by in ["SPECIES", "EXPERIMENT"]:
subdir = key
elif job_context["dataset"].aggregate_by == "ALL":
subdir = "ALL"
# Normalize the Header format
untransposed.index.rename("Gene", inplace=True)
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
outfile = outfile_dir + key + ".tsv"
job_context["smash_outfile"] = outfile
untransposed.to_csv(outfile, sep="\t", encoding="utf-8")
log_state("end _smash_key for {}".format(key), job_context["job"].id, start_smash)
return job_context
def _smash_all(job_context: Dict) -> Dict:
"""Perform smashing on all species/experiments in the dataset.
"""
start_smash = log_state("start smash", job_context["job"].id)
# We have already failed - return now so we can send our fail email.
if job_context["job"].success is False:
return job_context
try:
job_context["unsmashable_files"] = []
job_context["num_samples"] = 0
# Smash all of the sample sets
logger.debug(
"About to smash!",
dataset_count=len(job_context["dataset"].data),
job_id=job_context["job"].id,
)
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop("input_files").items():
job_context = _smash_key(job_context, key, input_files)
smashing_utils.write_non_data_files(job_context)
# Finally, compress all files into a zip
final_zip_base = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk)
shutil.make_archive(final_zip_base, "zip", job_context["output_dir"])
job_context["output_file"] = final_zip_base + ".zip"
except Exception as e:
logger.exception(
"Could not smash dataset.",
dataset_id=job_context["dataset"].id,
processor_job_id=job_context["job_id"],
num_input_files=job_context["num_input_files"],
)
job_context["dataset"].success = False
job_context["job"].failure_reason = "Failure reason: " + str(e)
job_context["dataset"].failure_reason = "Failure reason: " + str(e)
job_context["dataset"].save()
# Delay failing this pipeline until the failure notify has been sent
job_context["job"].success = False
job_context["failure_reason"] = str(e)
return job_context
job_context["dataset"].success = True
job_context["dataset"].save()
logger.debug("Created smash output!", archive_location=job_context["output_file"])
log_state("end smash", job_context["job"].id, start_smash)
return job_context
def _upload(job_context: Dict) -> Dict:
""" Uploads the result file to S3 and notifies user. """
# There has been a failure already, don't try to upload anything.
if not job_context.get("output_file", None):
logger.error(
"Was told to upload a smash result without an output_file.",
job_id=job_context["job"].id,
)
return job_context
try:
if job_context.get("upload", True) and settings.RUNNING_IN_CLOUD:
s3_client = boto3.client("s3")
# Note that file expiry is handled by the S3 object lifecycle,
# managed by terraform.
s3_client.upload_file(
job_context["output_file"],
RESULTS_BUCKET,
job_context["output_file"].split("/")[-1],
ExtraArgs={"ACL": "public-read"},
)
result_url = (
"https://s3.amazonaws.com/"
+ RESULTS_BUCKET
+ "/"
+ job_context["output_file"].split("/")[-1]
)
job_context["result_url"] = result_url
logger.debug("Result uploaded!", result_url=job_context["result_url"])
job_context["dataset"].s3_bucket = RESULTS_BUCKET
job_context["dataset"].s3_key = job_context["output_file"].split("/")[-1]
job_context["dataset"].size_in_bytes = calculate_file_size(job_context["output_file"])
job_context["dataset"].sha1 = calculate_sha1(job_context["output_file"])
job_context["dataset"].save()
# File is uploaded, we can delete the local.
try:
os.remove(job_context["output_file"])
except OSError:
pass
except Exception as e:
logger.exception("Failed to upload smash result file.", file=job_context["output_file"])
job_context["job"].success = False
job_context["job"].failure_reason = "Failure reason: " + str(e)
# Delay failing this pipeline until the failure notify has been sent
# job_context['success'] = False
return job_context
def _notify(job_context: Dict) -> Dict:
""" Use AWS SES to notify a user of a smash result.. """
if not job_context.get("upload", True) or not settings.RUNNING_IN_CLOUD:
return job_context
# Send a notification to slack when a dataset fails to be processed
if job_context["job"].success is False:
try:
_notify_slack_failed_dataset(job_context)
except Exception as e:
logger.warn(e) # It doesn't really matter if this didn't work
# Don't send an email if we don't have address.
if job_context["dataset"].email_address:
# Try to send the email.
try:
_notify_send_email(job_context)
# Display an error if something goes wrong.
except ClientError as e:
raise utils.ProcessorJobError(
"ClientError while notifying",
success=False,
exc_info=1,
client_error_message=e.response["Error"]["Message"],
)
except Exception as e:
raise utils.ProcessorJobError(
"General failure when trying to send email.",
success=False,
exc_info=1,
result_url=job_context["result_url"],
)
# We don't want to retry this dataset after we send a notification to users
# https://github.com/alexslemonade/refinebio/issues/1944
job_context["job"].no_retry = True
job_context["job"].save()
return job_context
def _notify_slack_failed_dataset(job_context: Dict):
""" Send a slack notification when a dataset fails to smash """
# Link to the dataset page, where the user can re-try the download job
dataset_url = "https://www.refine.bio/dataset/" + str(job_context["dataset"].id)
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments": [
{
"fallback": "Dataset failed processing.",
"title": "Dataset failed processing",
"title_link": dataset_url,
"color": "#db3b28",
"text": job_context["job"].failure_reason,
"fields": [
{
"title": "Dataset id",
"value": str(job_context["dataset"].id),
"short": True,
},
{
"title": "Email",
"value": job_context["dataset"].email_address,
"short": True,
},
],
"footer": "Refine.bio",
"footer_icon": "https://s3.amazonaws.com/refinebio-email/logo-2x.png",
}
],
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def _notify_send_email(job_context):
""" Send email notification to the user if the dataset succeded or failed. """
dataset_url = "https://www.refine.bio/dataset/" + str(job_context["dataset"].id)
SENDER = "Refine.bio Mail Robot <noreply@refine.bio>"
RECIPIENT = job_context["dataset"].email_address
AWS_REGION = "us-east-1"
CHARSET = "UTF-8"
if job_context["job"].success is False:
SUBJECT = "There was a problem processing your refine.bio dataset :("
BODY_TEXT = (
"We tried but were unable to process your requested dataset. Error was: \n\n"
+ str(job_context["job"].failure_reason)
+ "\nDataset ID: "
+ str(job_context["dataset"].id)
+ "\n We have been notified and are looking into the problem. \n\nSorry!"
)
ERROR_EMAIL_TITLE = quote("I can't download my dataset")
ERROR_EMAIL_BODY = quote(
"""
[What browser are you using?]
[Add details of the issue you are facing]
---
"""
+ str(job_context["dataset"].id)
)
FORMATTED_HTML = (
BODY_ERROR_HTML.replace("REPLACE_DATASET_URL", dataset_url)
.replace("REPLACE_ERROR_TEXT", job_context["job"].failure_reason)
.replace(
"REPLACE_NEW_ISSUE",
"https://github.com/AlexsLemonade/refinebio/issues/new?title={0}&body={1}&labels=bug".format(
ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY
),
)
.replace(
"REPLACE_MAILTO",
"mailto:ccdl@alexslemonade.org?subject={0}&body={1}".format(
ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY
),
)
)
job_context["success"] = False
else:
SUBJECT = "Your refine.bio Dataset is Ready!"
BODY_TEXT = "Hot off the presses:\n\n" + dataset_url + "\n\nLove!,\nThe refine.bio Team"
FORMATTED_HTML = BODY_HTML.replace("REPLACE_DOWNLOAD_URL", dataset_url).replace(
"REPLACE_DATASET_URL", dataset_url
)
# Create a new SES resource and specify a region.
client = boto3.client("ses", region_name=AWS_REGION)
# Provide the contents of the email.
response = client.send_email(
Destination={"ToAddresses": [RECIPIENT,],},
Message={
"Body": {
"Html": {"Charset": CHARSET, "Data": FORMATTED_HTML,},
"Text": {"Charset": CHARSET, "Data": BODY_TEXT,},
},
"Subject": {"Charset": CHARSET, "Data": SUBJECT,},
},
Source=SENDER,
)
def _update_result_objects(job_context: Dict) -> Dict:
"""Closes out the dataset object."""
dataset = job_context["dataset"]
dataset.is_processing = False
dataset.is_processed = True
dataset.is_available = True
dataset.expires_on = timezone.now() + timedelta(days=7)
dataset.save()
job_context["success"] = True
return job_context
def smash(job_id: int, upload=True) -> None:
""" Main Smasher interface """
pipeline = Pipeline(name=PipelineEnum.SMASHER.value)
job_context = utils.run_pipeline(
{"job_id": job_id, "upload": upload, "pipeline": pipeline},
[
utils.start_job,
smashing_utils.prepare_files,
_smash_all,
_upload,
_update_result_objects,
utils.end_job,
],
)
# ensure that `notify` is always called so that users get emails in case processing fails or succeeds
job_context = _notify(job_context)
return job_context
<|code_end|>
workers/data_refinery_workers/processors/smashing_utils.py
<|code_start|># -*- coding: utf-8 -*-
import csv
import logging
import math
import os
import multiprocessing
import shutil
import time
from pathlib import Path
from typing import Dict, List, Tuple
from concurrent.futures import ThreadPoolExecutor
from django.utils import timezone
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as rlang
from rpy2.robjects.packages import importr
import numpy as np
import pandas as pd
import psutil
import rpy2.robjects as ro
import simplejson as json
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import ComputedFile, Sample
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
MULTIPROCESSING_MAX_THREAD_COUNT = max(1, math.floor(multiprocessing.cpu_count() / 2) - 1)
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BODY_HTML = (
Path("data_refinery_workers/processors/smasher_email.min.html").read_text().replace("\n", "")
)
BODY_ERROR_HTML = (
Path("data_refinery_workers/processors/smasher_email_error.min.html")
.read_text()
.replace("\n", "")
)
BYTES_IN_GB = 1024 * 1024 * 1024
QN_CHUNK_SIZE = 10000
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName("DEBUG"))
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message, total_cpu=psutil.cpu_percent(), process_ram=ram_in_GB, job_id=job_id)
if start_time:
logger.debug("Duration: %s" % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def prepare_files(job_context: Dict) -> Dict:
"""
Fetches and prepares the files to smash.
"""
start_prepare_files = log_state("start prepare files", job_context["job"].id)
found_files = False
job_context["filtered_samples"] = {}
job_context["input_files"] = {}
# `key` can either be the species name or experiment accession.
for key, samples in job_context["samples"].items():
smashable_files = []
seen_files = set()
for sample in samples:
smashable_file = sample.get_most_recent_smashable_result_file()
if smashable_file is not None and smashable_file not in seen_files:
smashable_files = smashable_files + [(smashable_file, sample)]
seen_files.add(smashable_file)
found_files = True
else:
sample_metadata = sample.to_metadata_dict()
job_context["filtered_samples"][sample.accession_code] = {
**sample_metadata,
"reason": "This sample did not have a processed file associated with it in our database.",
"experiment_accession_code": get_experiment_accession(
sample.accession_code, job_context["dataset"].data
),
}
job_context["input_files"][key] = smashable_files
job_context["num_input_files"] = len(job_context["input_files"])
job_context["group_by_keys"] = list(job_context["input_files"].keys())
if not found_files:
raise utils.ProcessorJobError(
"Couldn't get any files to smash for Smash job!!",
success=False,
dataset_id=job_context["dataset"].id,
num_samples=len(job_context["samples"]),
)
dataset_id = str(job_context["dataset"].pk)
job_context["work_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
# Ensure we have a fresh smash directory
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
os.makedirs(job_context["work_dir"])
job_context["output_dir"] = job_context["work_dir"] + "output/"
os.makedirs(job_context["output_dir"])
log_state("end prepare files", job_context["job"].id, start_prepare_files)
return job_context
def _load_and_sanitize_file(computed_file_path) -> pd.DataFrame:
""" Read and sanitize a computed file """
data = pd.read_csv(
computed_file_path,
sep="\t",
header=0,
index_col=0,
dtype={0: str, 1: np.float32},
error_bad_lines=False,
)
# Strip any funky whitespace
data.columns = data.columns.str.strip()
data = data.dropna(axis="columns", how="all")
# Make sure the index type is correct
data.index = data.index.map(str)
# Ensure that we don't have any dangling Brainarray-generated probe symbols.
# BA likes to leave '_at', signifying probe identifiers,
# on their converted, non-probe identifiers. It makes no sense.
# So, we chop them off and don't worry about it.
data.index = data.index.str.replace("_at", "")
# Remove any lingering Affymetrix control probes ("AFFX-")
data = data[~data.index.str.contains("AFFX-")]
# If there are any _versioned_ gene identifiers, remove that
# version information. We're using the latest brainarray for everything anyway.
# Jackie says this is okay.
# She also says that in the future, we may only want to do this
# for cross-technology smashes.
# This regex needs to be able to handle EGIDs in the form:
# ENSGXXXXYYYZZZZ.6
# and
# fgenesh2_kg.7__3016__AT5G35080.1 (via http://plants.ensembl.org/Arabidopsis_lyrata/ \
# Gene/Summary?g=fgenesh2_kg.7__3016__AT5G35080.1;r=7:17949732-17952000;t=fgenesh2_kg. \
# 7__3016__AT5G35080.1;db=core)
data.index = data.index.str.replace(r"(\.[^.]*)$", "")
# Squish duplicated rows together.
# XXX/TODO: Is mean the appropriate method here?
# We can make this an option in future.
# Discussion here: https://github.com/AlexsLemonade/refinebio/issues/186#issuecomment-395516419
data = data.groupby(data.index, sort=False).mean()
return data
def process_frame(work_dir, computed_file, sample_accession_code, aggregate_by) -> pd.DataFrame:
""" Downloads the computed file from S3 and tries to see if it's smashable.
Returns a data frame if the file can be processed or False otherwise. """
try:
# Download the file to a job-specific location so it
# won't disappear while we're using it.
computed_file_path = computed_file.get_synced_file_path(
path="%s%s" % (work_dir, computed_file.filename)
)
# Bail appropriately if this isn't a real file.
if not computed_file_path or not os.path.exists(computed_file_path):
logger.warning(
"Smasher received non-existent file path.",
computed_file_path=computed_file_path,
computed_file_id=computed_file.id,
)
return None
data = _load_and_sanitize_file(computed_file_path)
if len(data.columns) > 2:
# Most of the time, >1 is actually bad, but we also need to support
# two-channel samples. I think ultimately those should be given some kind of
# special consideration.
logger.info(
"Found a frame with more than 2 columns - this shouldn't happen!",
computed_file_path=computed_file_path,
computed_file_id=computed_file.id,
)
return None
# via https://github.com/AlexsLemonade/refinebio/issues/330:
# aggregating by experiment -> return untransformed output from tximport
# aggregating by species -> log2(x + 1) tximport output
if aggregate_by == "SPECIES" and computed_file.has_been_log2scaled():
data = data + 1
data = np.log2(data)
# Ideally done in the NO-OPPER, but sanity check here.
if (not computed_file.has_been_log2scaled()) and (data.max() > 100).any():
logger.info("Detected non-log2 microarray data.", computed_file_id=computed_file.id)
data = np.log2(data)
# Explicitly title this dataframe
try:
data.columns = [sample_accession_code]
except ValueError as e:
# This sample might have multiple channels, or something else.
# Don't mess with it.
logger.warn(
"Smasher found multi-channel column (probably) - skipping!",
exc_info=1,
computed_file_path=computed_file_path,
)
return None
except Exception as e:
# Okay, somebody probably forgot to create a SampleComputedFileAssociation
# Don't mess with it.
logger.warn(
"Smasher found very bad column title - skipping!",
exc_info=1,
computed_file_path=computed_file_path,
)
return None
except Exception as e:
logger.exception("Unable to smash file", file=computed_file_path)
return None
# TEMPORARY for iterating on compendia more quickly.
# finally:
# # Delete before archiving the work dir
# if computed_file_path and os.path.exists(computed_file_path):
# os.remove(computed_file_path)
return data
def load_first_pass_data_if_cached(work_dir: str):
path = os.path.join(work_dir, "first_pass.csv")
try:
with open(path, newline="") as csvfile:
reader = csv.reader(csvfile)
gene_ids = next(reader)
microarray_columns = next(reader)
rnaseq_columns = next(reader)
return {
"gene_ids": gene_ids,
"microarray_columns": microarray_columns,
"rnaseq_columns": rnaseq_columns,
}
# If the file doesn't exist then the gene ids aren't cached. Any
# other exception should be handled and higher in the stack.
except FileNotFoundError:
return None
def cache_first_pass(
job_context: Dict, gene_ids: List[str], microarray_columns: List[str], rnaseq_columns: List[str]
):
try:
path = os.path.join(job_context["work_dir"], "first_pass.csv")
logger.info(
"Caching gene_ids, microarray_columns, and rnaseq_columns to %s",
path,
job_id=job_context["job"].id,
)
with open(path, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(gene_ids)
writer.writerow(microarray_columns)
writer.writerow(rnaseq_columns)
# Nothing in the above try should raise an exception, but if it
# does don't waste the work we did in the first pass.
except Exception:
logger.exception(
"Error writing gene identifiers to CSV file.", job_id=job_context["job"].id
)
def process_frames_for_key(
key: str, input_files: List[Tuple[ComputedFile, Sample]], job_context: Dict
) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the keys 'microarray_matrix' and
'rnaseq_matrix' with pandas dataframes containing all of the
samples' data. Also adds the key 'unsmashable_files' containing a
list of paths that were determined to be unsmashable.
"""
start_gene_ids = log_state(
"Collecting all gene identifiers for key {}".format(key), job_context["job"].id
)
# Build up a list of gene identifiers because these will be the
# rows of our matrices, and we want to preallocate them so we need
# to know them all.
## We may have built this list in a previous job, check to see if it's cached:
cached_data = load_first_pass_data_if_cached(job_context["work_dir"])
first_pass_was_cached = False
if cached_data:
logger.info(
(
"The data from the first pass was cached, so we're using "
"that and skipping the first pass."
),
job_id=job_context["job"].id,
)
first_pass_was_cached = True
all_gene_identifiers = cached_data["gene_ids"]
microarray_columns = cached_data["microarray_columns"]
rnaseq_columns = cached_data["rnaseq_columns"]
else:
gene_identifier_counts = {}
microarray_columns = []
rnaseq_columns = []
for index, (computed_file, sample) in enumerate(input_files):
log_state("1st processing frame {}".format(index), job_context["job"].id)
frame_data = process_frame(
job_context["work_dir"],
computed_file,
sample.accession_code,
job_context["dataset"].aggregate_by,
)
if frame_data is None:
# we were unable to process this sample, so we drop
logger.warning(
"Unable to smash file",
computed_file=computed_file.id,
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
)
sample_metadata = sample.to_metadata_dict()
job_context["filtered_samples"][sample.accession_code] = {
**sample_metadata,
"reason": "The file associated with this sample did not pass the QC checks we apply before aggregating.",
"filename": computed_file.filename,
"experiment_accession_code": get_experiment_accession(
sample.accession_code, job_context["dataset"].data
),
}
continue
# Count how many frames are in each tech so we can preallocate
# the matrices in both directions.
for gene_id in frame_data.index:
if gene_id in gene_identifier_counts:
gene_identifier_counts[gene_id] += 1
else:
gene_identifier_counts[gene_id] = 1
# Each dataframe should only have 1 column, but it's
# returned as a list so use extend.
if sample.technology == "MICROARRAY":
microarray_columns.extend(frame_data.columns)
elif sample.technology == "RNA-SEQ":
rnaseq_columns.extend(frame_data.columns)
# We only want to use gene identifiers which are present
# in >50% of the samples. We're doing this because a large
# number of gene identifiers present in only a modest
# number of experiments have leaked through. We wouldn't
# necessarily want to do this if we'd mapped all the data
# to ENSEMBL identifiers successfully.
total_samples = len(microarray_columns) + len(rnaseq_columns)
all_gene_identifiers = [
gene_id
for gene_id in gene_identifier_counts
if gene_identifier_counts[gene_id] > (total_samples * 0.5)
]
all_gene_identifiers.sort()
del gene_identifier_counts
log_template = (
"Collected {0} gene identifiers for {1} across"
" {2} micrarry samples and {3} RNA-Seq samples."
)
log_state(
log_template.format(
len(all_gene_identifiers), key, len(microarray_columns), len(rnaseq_columns)
),
job_context["job"].id,
start_gene_ids,
)
# Temporarily only cache mouse compendia because it may not succeed.
if not first_pass_was_cached and key == "MUS_MUSCULUS":
cache_first_pass(job_context, all_gene_identifiers, microarray_columns, rnaseq_columns)
start_build_matrix = log_state("Beginning to build the full matrices.", job_context["job"].id)
# Sort the columns so that the matrices are in predictable orders.
microarray_columns.sort()
rnaseq_columns.sort()
# Preallocate the matrices to be the exact size we will need. This
# should prevent any operations from happening while we build it
# up, so the only RAM used will be needed.
job_context["microarray_matrix"] = pd.DataFrame(
data=None, index=all_gene_identifiers, columns=microarray_columns, dtype=np.float32
)
job_context["rnaseq_matrix"] = pd.DataFrame(
data=None, index=all_gene_identifiers, columns=rnaseq_columns, dtype=np.float32
)
for index, (computed_file, sample) in enumerate(input_files):
log_state("2nd processing frame {}".format(index), job_context["job"].id)
frame_data = process_frame(
job_context["work_dir"],
computed_file,
sample.accession_code,
job_context["dataset"].aggregate_by,
)
if frame_data is None:
job_context["unsmashable_files"].append(computed_file.filename)
sample_metadata = sample.to_metadata_dict()
job_context["filtered_samples"][sample.accession_code] = {
**sample_metadata,
"reason": "The file associated with this sample did not contain a vector that fit the expected dimensions of the matrix.",
"filename": computed_file.filename,
"experiment_accession_code": get_experiment_accession(
sample.accession_code, job_context["dataset"].data
),
}
continue
frame_data = frame_data.reindex(all_gene_identifiers)
# The dataframe for each sample will only have one column
# whose header will be the accession code.
column = frame_data.columns[0]
if sample.technology == "MICROARRAY":
job_context["microarray_matrix"][column] = frame_data.values
elif sample.technology == "RNA-SEQ":
job_context["rnaseq_matrix"][column] = frame_data.values
job_context["num_samples"] = 0
if job_context["microarray_matrix"] is not None:
job_context["num_samples"] += len(job_context["microarray_matrix"].columns)
if job_context["rnaseq_matrix"] is not None:
job_context["num_samples"] += len(job_context["rnaseq_matrix"].columns)
log_state(
"Built full matrices for key {}".format(key), job_context["job"].id, start_build_matrix
)
return job_context
# Modified from: http://yaoyao.codes/pandas/2018/01/23/pandas-split-a-dataframe-into-chunks
def _index_marks(num_columns, chunk_size):
return range(chunk_size, math.ceil(num_columns / chunk_size) * chunk_size, chunk_size)
def _split_dataframe_columns(dataframe, chunk_size):
indices = _index_marks(dataframe.shape[1], chunk_size)
return np.split(dataframe, indices, axis=1)
def _quantile_normalize_matrix(target_vector, original_matrix):
preprocessCore = importr("preprocessCore")
as_numeric = rlang("as.numeric")
data_matrix = rlang("data.matrix")
# Convert the smashed frames to an R numeric Matrix
target_vector = as_numeric(target_vector)
# Do so in chunks if the matrix is too large.
if original_matrix.shape[1] <= QN_CHUNK_SIZE:
merged_matrix = data_matrix(original_matrix)
normalized_matrix = preprocessCore.normalize_quantiles_use_target(
x=merged_matrix, target=target_vector, copy=True
)
# And finally convert back to Pandas
ar = np.array(normalized_matrix)
new_merged = pd.DataFrame(ar, columns=original_matrix.columns, index=original_matrix.index)
else:
matrix_chunks = _split_dataframe_columns(original_matrix, QN_CHUNK_SIZE)
for i, chunk in enumerate(matrix_chunks):
R_chunk = data_matrix(chunk)
normalized_chunk = preprocessCore.normalize_quantiles_use_target(
x=R_chunk, target=target_vector, copy=True
)
ar = np.array(normalized_chunk)
start_column = i * QN_CHUNK_SIZE
end_column = (i + 1) * QN_CHUNK_SIZE
original_matrix.iloc[:, start_column:end_column] = ar
new_merged = original_matrix
return new_merged
def _test_qn(merged_matrix):
""" Selects a list of 100 random pairs of columns and performs the KS Test on them.
Returns a list of tuples with the results of the KN test (statistic, pvalue) """
# Verify this QN, related:
# https://github.com/AlexsLemonade/refinebio/issues/599#issuecomment-422132009
data_matrix = rlang("data.matrix")
as_numeric = rlang("as.numeric")
set_seed = rlang("set.seed")
combn = rlang("combn")
ncol = rlang("ncol")
ks_test = rlang("ks.test")
which = rlang("which")
merged_R_matrix = data_matrix(merged_matrix)
set_seed(123)
n = ncol(merged_R_matrix)[0]
m = 2
# Not enough columns to perform KS test - either bad smash or single sample smash.
if n < m:
return None
# This wont work with larger matricies
# https://github.com/AlexsLemonade/refinebio/issues/1860
ncolumns = ncol(merged_R_matrix)
if ncolumns[0] <= 200:
# Convert to NP, Shuffle, Return to R
combos = combn(ncolumns, 2)
ar = np.array(combos)
np.random.shuffle(np.transpose(ar))
else:
indexes = [*range(ncolumns[0])]
np.random.shuffle(indexes)
ar = np.array([*zip(indexes[0:100], indexes[100:200])])
nr, nc = ar.shape
combos = ro.r.matrix(ar, nrow=nr, ncol=nc)
result = []
# adapted from
# https://stackoverflow.com/questions/9661469/r-t-test-over-all-columns
# apply KS test to randomly selected pairs of columns (samples)
for i in range(1, min(ncol(combos)[0], 100)):
value1 = combos.rx(1, i)[0]
value2 = combos.rx(2, i)[0]
test_a = merged_R_matrix.rx(True, value1)
test_b = merged_R_matrix.rx(True, value2)
# RNA-seq has a lot of zeroes in it, which
# breaks the ks_test. Therefore we want to
# filter them out. To do this we drop the
# lowest half of the values. If there's
# still zeroes in there, then that's
# probably too many zeroes so it's okay to
# fail.
median_a = np.median(test_a)
median_b = np.median(test_b)
# `which` returns indices which are
# 1-indexed. Python accesses lists with
# zero-indexes, even if that list is
# actually an R vector. Therefore subtract
# 1 to account for the difference.
test_a = [test_a[i - 1] for i in which(test_a > median_a)]
test_b = [test_b[i - 1] for i in which(test_b > median_b)]
# The python list comprehension gives us a
# python list, but ks_test wants an R
# vector so let's go back.
test_a = as_numeric(test_a)
test_b = as_numeric(test_b)
ks_res = ks_test(test_a, test_b)
statistic = ks_res.rx("statistic")[0][0]
pvalue = ks_res.rx("p.value")[0][0]
result.append((statistic, pvalue))
return result
def quantile_normalize(job_context: Dict, ks_check=True, ks_stat=0.001) -> Dict:
"""
Apply quantile normalization.
"""
# Prepare our QN target file
organism = job_context["organism"]
if not organism.qn_target:
failure_reason = "Could not find QN target for Organism: " + str(organism)
job_context["dataset"].success = False
job_context["dataset"].failure_reason = failure_reason
job_context["dataset"].save()
raise utils.ProcessorJobError(
failure_reason, success=False, organism=organism, dataset_id=job_context["dataset"].id
)
qn_target_path = organism.qn_target.computedfile_set.latest().sync_from_s3()
qn_target_frame = pd.read_csv(
qn_target_path, sep="\t", header=None, index_col=None, error_bad_lines=False
)
# Prepare our RPy2 bridge
pandas2ri.activate()
# Remove un-quantiled normalized matrix from job_context
# because we no longer need it.
merged_no_qn = job_context.pop("merged_no_qn")
# Perform the Actual QN
new_merged = _quantile_normalize_matrix(qn_target_frame[0], merged_no_qn)
# And add the quantile normalized matrix to job_context.
job_context["merged_qn"] = new_merged
# For now, don't test the QN for mouse/human. This never fails on
# smasher jobs and is OOM-killing our very large compendia
# jobs. Let's run this manually after we have a compendia job
# actually finish.
if organism.name in ["MUS_MUSCULUS", "HOMO_SAPIENS"]:
return job_context
ks_res = _test_qn(new_merged)
if ks_res:
for (statistic, pvalue) in ks_res:
job_context["ks_statistic"] = statistic
job_context["ks_pvalue"] = pvalue
# We're unsure of how strigent to be about
# the pvalue just yet, so we're extra lax
# rather than failing tons of tests. This may need tuning.
if ks_check and (statistic > ks_stat or pvalue < 0.8):
job_context["ks_warning"] = (
"Failed Kolmogorov Smirnov test! Stat: "
+ str(statistic)
+ ", PVal: "
+ str(pvalue)
)
else:
logger.warning(
"Not enough columns to perform KS test - either bad smash or single sample smash.",
dataset_id=job_context["dataset"].id,
)
return job_context
def compile_metadata(job_context: Dict) -> Dict:
"""Compiles metadata about the job.
Returns a new dict containing the metadata, not the job_context.
"""
metadata = {}
metadata["num_samples"] = job_context["num_samples"]
metadata["num_experiments"] = job_context["experiments"].count()
metadata["quant_sf_only"] = job_context["dataset"].quant_sf_only
if not job_context["dataset"].quant_sf_only:
metadata["aggregate_by"] = job_context["dataset"].aggregate_by
metadata["scale_by"] = job_context["dataset"].scale_by
# https://github.com/AlexsLemonade/refinebio/pull/421#discussion_r203799646
# TODO: do something with these.
# metadata['non_aggregated_files'] = job_context["unsmashable_files"]
metadata["ks_statistic"] = job_context.get("ks_statistic", None)
metadata["ks_pvalue"] = job_context.get("ks_pvalue", None)
metadata["ks_warning"] = job_context.get("ks_warning", None)
metadata["quantile_normalized"] = job_context["dataset"].quantile_normalize
filtered_samples = job_context["filtered_samples"]
samples = {}
for sample in job_context["dataset"].get_samples():
if sample.accession_code in filtered_samples:
# skip the samples that were filtered
continue
samples[sample.accession_code] = sample.to_metadata_dict()
metadata["samples"] = samples
experiments = {}
for experiment in job_context["dataset"].get_experiments():
experiment_metadata = experiment.to_metadata_dict()
# exclude filtered samples from experiment metadata
all_samples = experiment_metadata["sample_accession_codes"]
all_samples = [code for code in all_samples if code not in filtered_samples]
experiment_metadata["sample_accession_codes"] = all_samples
experiments[experiment.accession_code] = experiment_metadata
metadata["experiments"] = experiments
return metadata
def write_non_data_files(job_context: Dict) -> Dict:
"""Writes the files that are not the actual data of the dataset.
This include LICENSE.txt and README.md files and the metadata.
Adds the key `metadata` to job_context and populates it with all
the metadata that needs to be written.
"""
job_context["metadata"] = compile_metadata(job_context)
shutil.copy("README_DATASET.md", job_context["output_dir"] + "README.md")
shutil.copy("LICENSE_DATASET.txt", job_context["output_dir"] + "LICENSE.TXT")
# Write samples metadata to TSV
try:
write_tsv_json(job_context)
# Metadata to JSON
job_context["metadata"]["created_at"] = timezone.now().strftime("%Y-%m-%dT%H:%M:%S")
aggregated_metadata_path = os.path.join(
job_context["output_dir"], "aggregated_metadata.json"
)
with open(aggregated_metadata_path, "w", encoding="utf-8") as metadata_file:
json.dump(job_context["metadata"], metadata_file, indent=4, sort_keys=True)
if job_context["filtered_samples"]:
# generate filtered samples file only if some samples were skipped
filtered_samples_path = os.path.join(
job_context["output_dir"], "filtered_samples_metadata.json"
)
with open(filtered_samples_path, "w", encoding="utf-8") as metadata_file:
json.dump(job_context["filtered_samples"], metadata_file, indent=4, sort_keys=True)
columns = get_tsv_columns(job_context["filtered_samples"])
filtered_samples_tsv_path = os.path.join(
job_context["output_dir"], "filtered_samples_metadata.tsv"
)
with open(filtered_samples_tsv_path, "w", encoding="utf-8") as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter="\t", extrasaction="ignore")
dw.writeheader()
for sample_metadata in job_context["filtered_samples"].values():
dw.writerow(get_tsv_row_data(sample_metadata, job_context["dataset"].data))
except Exception as e:
logger.exception("Failed to write metadata TSV!", job_id=job_context["job"].id)
return job_context
def get_experiment_accession(sample_accession_code, dataset_data):
for experiment_accession, samples in dataset_data.items():
if sample_accession_code in samples:
return experiment_accession
return "" # Should never happen, because the sample is by definition in the dataset
def _add_annotation_column(annotation_columns, column_name):
"""Add annotation column names in place.
Any column_name that starts with "refinebio_" will be skipped.
"""
if not column_name.startswith("refinebio_"):
annotation_columns.add(column_name)
def _add_annotation_value(row_data, col_name, col_value, sample_accession_code):
"""Adds a new `col_name` key whose value is `col_value` to row_data.
If col_name already exists in row_data with different value, print
out a warning message.
"""
# Generate a warning message if annotation field name starts with
# "refinebio_". This should rarely (if ever) happen.
if col_name.startswith("refinebio_"):
logger.warning(
"Annotation value skipped",
annotation_field=col_name,
annotation_value=col_value,
sample_accession_code=sample_accession_code,
)
elif col_name not in row_data:
row_data[col_name] = col_value
# Generate a warning message in case of conflicts of annotation values.
# (Requested by Dr. Jackie Taroni)
elif row_data[col_name] != col_value:
logger.warning(
"Conflict of values found in column %s: %s vs. %s"
% (col_name, row_data[col_name], col_value),
sample_accession_code=sample_accession_code,
)
def get_tsv_row_data(sample_metadata, dataset_data):
"""Returns field values based on input sample_metadata.
Some annotation fields are treated specially because they are more
important. See `get_tsv_columns` function above for details.
"""
sample_accession_code = sample_metadata.get("refinebio_accession_code", "")
row_data = dict()
for meta_key, meta_value in sample_metadata.items():
# If the field is a refinebio-specific field, simply copy it.
if meta_key != "refinebio_annotations":
row_data[meta_key] = meta_value
continue
# Decompose sample_metadata["refinebio_annotations"], which is
# an array of annotations.
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# "characteristic" in ArrayExpress annotation
if (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "characteristic"
):
for pair_dict in annotation_value:
if "category" in pair_dict and "value" in pair_dict:
col_name, col_value = pair_dict["category"], pair_dict["value"]
_add_annotation_value(
row_data, col_name, col_value, sample_accession_code
)
# "variable" in ArrayExpress annotation
elif (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "variable"
):
for pair_dict in annotation_value:
if "name" in pair_dict and "value" in pair_dict:
col_name, col_value = pair_dict["name"], pair_dict["value"]
_add_annotation_value(
row_data, col_name, col_value, sample_accession_code
)
# Skip "source" field ArrayExpress sample's annotation
elif (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "source"
):
continue
# "characteristics_ch1" in GEO annotation
elif (
sample_metadata.get("refinebio_source_database", "") == "GEO"
and annotation_key == "characteristics_ch1"
): # array of strings
for pair_str in annotation_value:
if ":" in pair_str:
col_name, col_value = pair_str.split(":", 1)
col_value = col_value.strip()
_add_annotation_value(
row_data, col_name, col_value, sample_accession_code
)
# If annotation_value includes only a 'name' key, extract its value directly:
elif (
isinstance(annotation_value, dict)
and len(annotation_value) == 1
and "name" in annotation_value
):
_add_annotation_value(
row_data, annotation_key, annotation_value["name"], sample_accession_code
)
# If annotation_value is a single-element array, extract the element directly:
elif isinstance(annotation_value, list) and len(annotation_value) == 1:
_add_annotation_value(
row_data, annotation_key, annotation_value[0], sample_accession_code
)
# Otherwise save all annotation fields in separate columns
else:
_add_annotation_value(
row_data, annotation_key, annotation_value, sample_accession_code
)
row_data["experiment_accession"] = get_experiment_accession(sample_accession_code, dataset_data)
return row_data
def get_tsv_columns(samples_metadata):
"""Returns an array of strings that will be written as a TSV file's
header. The columns are based on fields found in samples_metadata.
Some nested annotation fields are taken out as separate columns
because they are more important than the others.
"""
refinebio_columns = set()
annotation_columns = set()
for sample_metadata in samples_metadata.values():
for meta_key, meta_value in sample_metadata.items():
if meta_key != "refinebio_annotations":
refinebio_columns.add(meta_key)
continue
# Decompose sample_metadata["annotations"], which is an array of annotations!
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# For ArrayExpress samples, take out the fields
# nested in "characteristic" as separate columns.
if (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "characteristic"
):
for pair_dict in annotation_value:
if "category" in pair_dict and "value" in pair_dict:
_add_annotation_column(annotation_columns, pair_dict["category"])
# For ArrayExpress samples, also take out the fields
# nested in "variable" as separate columns.
elif (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "variable"
):
for pair_dict in annotation_value:
if "name" in pair_dict and "value" in pair_dict:
_add_annotation_column(annotation_columns, pair_dict["name"])
# For ArrayExpress samples, skip "source" field
elif (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "source"
):
continue
# For GEO samples, take out the fields nested in
# "characteristics_ch1" as separate columns.
elif (
sample_metadata.get("refinebio_source_database", "") == "GEO"
and annotation_key == "characteristics_ch1"
): # array of strings
for pair_str in annotation_value:
if ":" in pair_str:
tokens = pair_str.split(":", 1)
_add_annotation_column(annotation_columns, tokens[0])
# Saves all other annotation fields in separate columns
else:
_add_annotation_column(annotation_columns, annotation_key)
# Return sorted columns, in which "refinebio_accession_code" and "experiment_accession" are
# always first, followed by the other refinebio columns (in alphabetic order), and
# annotation columns (in alphabetic order) at the end.
refinebio_columns.discard("refinebio_accession_code")
return (
["refinebio_accession_code", "experiment_accession"]
+ sorted(refinebio_columns)
+ sorted(annotation_columns)
)
def write_tsv_json(job_context):
"""Writes tsv files on disk.
If the dataset is aggregated by species, also write species-level
JSON file.
"""
# Avoid pulling this out of job_context repeatedly.
metadata = job_context["metadata"]
# Uniform TSV header per dataset
columns = get_tsv_columns(metadata["samples"])
# Per-Experiment Metadata
if job_context["dataset"].aggregate_by == "EXPERIMENT":
tsv_paths = []
for experiment_title, experiment_data in metadata["experiments"].items():
experiment_dir = job_context["output_dir"] + experiment_title + "/"
experiment_dir = experiment_dir.encode("ascii", "ignore")
os.makedirs(experiment_dir, exist_ok=True)
tsv_path = experiment_dir.decode("utf-8") + "metadata_" + experiment_title + ".tsv"
tsv_path = tsv_path.encode("ascii", "ignore")
tsv_paths.append(tsv_path)
with open(tsv_path, "w", encoding="utf-8") as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter="\t", extrasaction="ignore")
dw.writeheader()
for sample_accession_code, sample_metadata in metadata["samples"].items():
if sample_accession_code in experiment_data["sample_accession_codes"]:
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
return tsv_paths
# Per-Species Metadata
elif job_context["dataset"].aggregate_by == "SPECIES":
tsv_paths = []
for species in job_context["group_by_keys"]:
species_dir = job_context["output_dir"] + species + "/"
os.makedirs(species_dir, exist_ok=True)
samples_in_species = []
tsv_path = species_dir + "metadata_" + species + ".tsv"
tsv_paths.append(tsv_path)
with open(tsv_path, "w", encoding="utf-8") as tsv_file:
# See http://www.lucainvernizzi.net/blog/2015/08/03/8x-speed-up-for-python-s-csv-dictwriter/
# about extrasaction.
dw = csv.DictWriter(tsv_file, columns, delimiter="\t", extrasaction="ignore")
dw.writeheader()
i = 0
for sample_metadata in metadata["samples"].values():
if sample_metadata.get("refinebio_organism", "") == species:
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
samples_in_species.append(sample_metadata)
i = i + 1
if i % 1000 == 0:
progress_template = (
"Done with {0} out of {1} lines of metadata " "for species {2}"
)
log_state(
progress_template.format(i, len(metadata["samples"]), species),
job_context["job"].id,
)
# Writes a json file for current species:
if len(samples_in_species):
species_metadata = {"species": species, "samples": samples_in_species}
json_path = species_dir + "metadata_" + species + ".json"
with open(json_path, "w", encoding="utf-8") as json_file:
json.dump(species_metadata, json_file, indent=4, sort_keys=True)
return tsv_paths
# All Metadata
else:
all_dir = job_context["output_dir"] + "ALL/"
os.makedirs(all_dir, exist_ok=True)
tsv_path = all_dir + "metadata_ALL.tsv"
with open(tsv_path, "w", encoding="utf-8") as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter="\t", extrasaction="ignore")
dw.writeheader()
for sample_metadata in metadata["samples"].values():
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
return [tsv_path]
def download_computed_file(download_tuple: Tuple[ComputedFile, str]):
""" this function downloads the latest computed file. Receives a tuple with
the computed file and the path where it needs to be downloaded
This is used to parallelize downloading quantsf files. """
(latest_computed_file, output_file_path) = download_tuple
try:
latest_computed_file.get_synced_file_path(path=output_file_path)
except:
# Let's not fail if there's an error syncing one of the quant.sf files
logger.exception("Failed to sync computed file", computed_file_id=latest_computed_file.pk)
def sync_quant_files(output_path, samples: List[Sample]):
""" Takes a list of ComputedFiles and copies the ones that are quant files to the provided directory.
Returns the total number of samples that were included """
num_samples = 0
page_size = 100
# split the samples in groups and download each one individually
with ThreadPoolExecutor(max_workers=MULTIPROCESSING_MAX_THREAD_COUNT) as executor:
# for each sample we need it's latest quant.sf file we don't want to query the db
# for all of them, so we do it in groups of 100, and then download all of the computed_files
# in parallel
for sample_page in (
samples[i * page_size : i + page_size] for i in range(0, len(samples), page_size)
):
sample_and_computed_files = []
for sample in sample_page:
latest_computed_file = sample.get_most_recent_quant_sf_file()
if not latest_computed_file:
continue
output_file_path = output_path + sample.accession_code + "_quant.sf"
sample_and_computed_files.append((latest_computed_file, output_file_path))
# download this set of files, this will take a few seconds that should also help the db recover
executor.map(download_computed_file, sample_and_computed_files)
num_samples += len(sample_and_computed_files)
return num_samples
<|code_end|>
workers/data_refinery_workers/processors/utils.py
<|code_start|>import os
import random
import shutil
import signal
import string
import subprocess
import sys
import yaml
import pickle
from django.conf import settings
from django.utils import timezone
from typing import List, Dict, Callable
from data_refinery_common.job_lookup import ProcessorEnum, ProcessorPipeline, SMASHER_JOB_TYPES
from data_refinery_common.job_management import create_downloader_job
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
Dataset,
DownloaderJob,
DownloaderJobOriginalFileAssociation,
OriginalFile,
OriginalFileSampleAssociation,
Pipeline,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
ProcessorJobOriginalFileAssociation,
Sample,
)
from data_refinery_common.utils import (
get_env_variable,
get_env_variable_gracefully,
get_instance_id,
)
logger = get_and_configure_logger(__name__)
# Let this fail if SYSTEM_VERSION is unset.
SYSTEM_VERSION = get_env_variable("SYSTEM_VERSION")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
S3_QN_TARGET_BUCKET_NAME = get_env_variable("S3_QN_TARGET_BUCKET_NAME", "data-refinery")
DIRNAME = os.path.dirname(os.path.abspath(__file__))
CURRENT_JOB = None
def signal_handler(sig, frame):
"""Signal Handler, works for both SIGTERM and SIGINT"""
global CURRENT_JOB
if CURRENT_JOB:
CURRENT_JOB.success = False
CURRENT_JOB.end_time = timezone.now()
CURRENT_JOB.num_retries = CURRENT_JOB.num_retries - 1
CURRENT_JOB.failure_reason = "Interruped by SIGTERM/SIGINT: " + str(sig)
CURRENT_JOB.save()
sys.exit(0)
def prepare_original_files(job_context):
""" Provision in the Job context for OriginalFile-driven processors
"""
job = job_context["job"]
original_files = job.original_files.all()
if original_files.count() == 0:
raise ProcessorJobError("No files were found for the job.", success=False)
undownloaded_files = set()
for original_file in original_files:
if original_file.needs_downloading(job_context["job_id"]):
if original_file.is_downloaded:
# If it needs to be downloaded then it's not
# downloaded and the is_downloaded field should stop
# lying about that.
original_file.is_downloaded = False
original_file.save()
undownloaded_files.add(original_file)
if undownloaded_files:
logger.info(
(
"One or more files found which were missing or not downloaded."
" Creating downloader jobs for them and deleting this job."
),
processor_job=job.id,
missing_files=list(undownloaded_files),
)
was_job_created = create_downloader_job(
undownloaded_files, processor_job_id=job_context["job_id"], force=True
)
if not was_job_created:
raise ProcessorJobError(
"Missing file for processor job but unable to recreate downloader jobs!",
success=False,
)
raise ProcessorJobError(
"We can not process the data because it is not on the disk",
success=False,
no_retry=True, # this job should not be retried again
abort=True, # abort the job and don't do anything else
undownloaded_files=[file.id for file in undownloaded_files],
)
job_context["original_files"] = original_files
first_original_file = original_files.first()
samples = Sample.objects.filter(original_files=first_original_file)
job_context["samples"] = samples
job_context["computed_files"] = []
return job_context
def prepare_dataset(job_context):
""" Provision in the Job context for Dataset-driven processors
"""
job = job_context["job"]
job_datasets = job.datasets.all()
# This should never be more than one!
if job_datasets.count() > 1:
raise ProcessorJobError(
"More than one dataset for processor job!", success=False, no_retry=True
)
elif job_datasets.count() == 0:
raise ProcessorJobError(
"No datasets found for processor job!", success=False, no_retry=True
)
dataset = job_datasets.first()
dataset.is_processing = True
dataset.save()
# Get the samples to smash
job_context["dataset"] = dataset
job_context["samples"] = dataset.get_aggregated_samples()
job_context["experiments"] = dataset.get_experiments()
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def start_job(job_context: Dict):
"""A processor function to start jobs.
Record in the database that this job is being started and
retrieves the job's batches from the database and adds them to the
dictionary passed in with the key 'batches'.
"""
job = job_context["job"]
original_file = job.original_files.first()
if (
not job.pipeline_applied == ProcessorPipeline.TXIMPORT.value
and original_file
and not original_file.needs_processing(job_context["job_id"])
):
failure_reason = (
"Sample has a good computed file, it must have been processed, "
"so it doesn't need to be downloaded! Aborting!"
)
logger.error(failure_reason, job_id=job.id, original_file=original_file)
job_context["original_files"] = []
job_context["computed_files"] = []
job_context["abort"] = True
# Will be saved by end_job.
job_context["job"].failure_reason = failure_reason
return job_context
# Set up the SIGTERM handler so we can appropriately handle being interrupted.
# (`docker stop` uses SIGTERM, not SIGINT.)
# (however, Nomad sends an SIGINT so catch both.)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# This job should not have been started, for some reason Nomad restarts some of our jobs
# https://github.com/AlexsLemonade/refinebio/issues/1487
if job.start_time is not None and settings.RUNNING_IN_CLOUD:
# Let's just log the event and let the job run instead of failing
# and also reset the endtime and failure reason, since those fields might have been set
logger.warn(
"ProcessorJob was restarted by Nomad. We do not know why this happened",
processor_job=job.id,
success=job.success,
failure_reason=job.failure_reason,
start_time=job.start_time,
end_time=job.end_time,
)
job.end_time = None
job.failure_reason = None
job.worker_id = get_instance_id()
job.worker_version = SYSTEM_VERSION
job.start_time = timezone.now()
job.save()
global CURRENT_JOB
CURRENT_JOB = job
logger.debug("Starting processor Job.", processor_job=job.id, pipeline=job.pipeline_applied)
# Janitor jobs don't operate on file objects.
# Tximport jobs don't need to download the original file, they
# just need it to know what experiment to process.
if job.pipeline_applied not in [
ProcessorPipeline.JANITOR.value,
ProcessorPipeline.TXIMPORT.value,
]:
# Some jobs take OriginalFiles, other take Datasets
if ProcessorPipeline[job.pipeline_applied] not in SMASHER_JOB_TYPES:
job_context = prepare_original_files(job_context)
if not job_context.get("success", True):
return job_context
else:
job_context = prepare_dataset(job_context)
if not job_context.get("success", True):
return job_context
else:
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def end_job(job_context: Dict, abort=False):
"""A processor function to end jobs.
Record in the database that this job has completed and that
the samples have been processed if not aborted.
"""
job = job_context["job"]
if "success" in job_context:
success = job_context["success"]
else:
success = True
# Upload first so if this fails we can set success = False and let
# the rest of the function mark it as failed.
if success:
# QN reference files go to a special bucket so they can be
# publicly available.
if job_context["job"].pipeline_applied == "QN_REFERENCE":
s3_bucket = S3_QN_TARGET_BUCKET_NAME
else:
s3_bucket = S3_BUCKET_NAME
# S3-sync Computed Files
for computed_file in job_context.get("computed_files", []):
# Ensure even distribution across S3 servers
nonce = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(24)
)
result = computed_file.sync_to_s3(s3_bucket, nonce + "_" + computed_file.filename)
if result and settings.RUNNING_IN_CLOUD:
computed_file.delete_local_file()
elif not result:
success = False
job_context["success"] = False
job.failure_reason = "Failed to upload computed file."
break
if not success:
for computed_file in job_context.get("computed_files", []):
computed_file.delete_local_file()
if computed_file.id:
computed_file.delete()
if not abort:
if job_context.get("success", False) and not (
job_context["job"].pipeline_applied
in [
ProcessorPipeline.SMASHER.value,
ProcessorPipeline.QN_REFERENCE.value,
ProcessorPipeline.CREATE_COMPENDIA.value,
ProcessorPipeline.CREATE_QUANTPENDIA.value,
ProcessorPipeline.JANITOR.value,
]
):
# Salmon requires the final `tximport` step to be fully `is_processed`.
mark_as_processed = True
if job_context["job"].pipeline_applied == "SALMON" and not job_context.get(
"tximported", False
):
mark_as_processed = False
if mark_as_processed:
# This handles most of our cases
unique_experiments = []
for sample in job_context.get("samples", []):
sample.is_processed = True
sample.save()
if sample.experiments.all().count() > 0:
unique_experiments = list(
set(unique_experiments + sample.experiments.all()[::1])
)
# Explicitly for the single-salmon scenario
if "sample" in job_context:
sample = job_context["sample"]
sample.is_processed = True
sample.save()
for experiment in unique_experiments:
experiment.update_num_samples()
# If we are aborting, it's because we want to do something
# different, so leave the original files so that "something
# different" can use them.
if (success or job.no_retry) and not abort:
# Cleanup Original Files
if "original_files" in job_context:
for original_file in job_context["original_files"]:
original_file.delete_local_file()
# If the pipeline includes any steps, save it.
if "pipeline" in job_context:
pipeline = job_context["pipeline"]
if len(pipeline.steps):
pipeline.save()
if (
"work_dir" in job_context
and job_context["job"].pipeline_applied != ProcessorPipeline.CREATE_COMPENDIA.value
and settings.RUNNING_IN_CLOUD
):
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
job.abort = abort
job.success = success
job.end_time = timezone.now()
job.save()
if success:
logger.debug(
"Processor job completed successfully.",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
)
else:
if not job.failure_reason:
logger.error(
"Processor job failed without having failure_reason set. FIX ME!!!!!!!!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
)
else:
logger.error(
"Processor job failed!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
failure_reason=job.failure_reason,
)
# Return Final Job context so testers can check it
return job_context
def run_pipeline(start_value: Dict, pipeline: List[Callable]):
"""Runs a pipeline of processor functions.
start_value must contain a key 'job_id' which is a valid id for a
ProcessorJob record.
Each processor fuction must accept a dictionary and return a
dictionary.
Any processor function which returns a dictionary containing a key
of 'success' with a value of False will cause the pipeline to
terminate with a call to utils.end_job.
The key 'job' is reserved for the ProcessorJob currently being
run. It is required that the dictionary returned by each
processor function preserve the mapping for 'job' that was passed
into it.
"""
job_id = start_value["job_id"]
try:
job = ProcessorJob.objects.get(id=job_id)
except ProcessorJob.DoesNotExist:
logger.error("Cannot find processor job record.", processor_job=job_id)
return
if len(pipeline) == 0:
logger.error("Empty pipeline specified.", procesor_job=job_id)
last_result = start_value
last_result["job"] = job
for processor in pipeline:
try:
last_result = processor(last_result)
except ProcessorJobError as e:
e.update_job(job)
logger.exception(e.failure_reason, processor_job=job.id, **e.context)
if e.success is False:
# end_job will use this and set the value
last_result["success"] = False
return end_job(last_result, abort=bool(e.abort))
except Exception as e:
failure_reason = (
"Unhandled exception caught while running processor" " function {} in pipeline: "
).format(processor.__name__)
logger.exception(failure_reason, no_retry=job.no_retry, processor_job=job_id)
last_result["success"] = False
last_result["job"].failure_reason = failure_reason + str(e)
return end_job(last_result)
if "success" in last_result and last_result["success"] is False:
logger.error(
"Processor function %s failed. Terminating pipeline.",
processor.__name__,
processor_job=job_id,
failure_reason=last_result["job"].failure_reason,
)
return end_job(last_result)
if last_result.get("abort", False):
return end_job(last_result, abort=True)
return last_result
class ProcessorJobError(Exception):
""" General processor job error class. """
def __init__(
self, failure_reason, *, success=None, no_retry=None, retried=None, abort=None, **context
):
super(ProcessorJobError, self).__init__(failure_reason)
self.failure_reason = failure_reason
self.success = success
self.no_retry = no_retry
self.retried = retried
self.abort = abort
# additional context to be included when logging
self.context = context
def update_job(self, job):
job.failure_reason = self.failure_reason
if self.success is not None:
job.success = self.success
if self.no_retry is not None:
job.no_retry = self.no_retry
if self.retried is not None:
job.retried = self.retried
if self.abort is not None:
job.abort = self.abort
job.save()
# also update the failure reason if this is a dataset's processor job
for dataset in job.datasets.all():
dataset.failure_reason = self.failure_reason
dataset.success = False
dataset.save()
def get_os_distro():
"""Returns a string of OS distribution.
Since we are using Docker, this function only considers Linux distribution.
Alternative files on Linux: /etc/os-release, /etc/lsb-release
As a matter of fact, "/etc/issue" doesn't exist on Mac OS X. We can use
"sw_vers" command to find its OS information.
A more cross-platform solution is using "platform" module in Python.
"""
with open("/etc/issue") as distro_fh:
return distro_fh.readline().strip("\l\n\\n ")
def get_os_pkgs(pkg_list):
"""Returns a dictionay in which each key is the name of an os-lvel
package and the corresponding value is the package's version.
This function assumes the package manager is Debian-based (dpkg/apt).
"""
pkg_info = dict()
for pkg in pkg_list:
process_done = subprocess.run(
["dpkg-query", "--show", pkg], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"OS-level package %s not found: %s" % (pkg, process_done.stderr.decode().strip())
)
version = process_done.stdout.decode().strip().split("\t")[-1]
pkg_info[pkg] = version
return pkg_info
def get_cmd_lines(cmd_list):
"""Returns a dictionary in which each key is a command string and
the corresponding value is the command's stripped output.
"""
cmd_info = dict()
for cmd in cmd_list:
process_done = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception(
"Failed to run command line '%s': %s" % (cmd, process_done.stderr.decode().strip())
)
output_bytes = process_done.stdout
# Workaround for the "salmontools --version"
# command, whose outputs are sent to stderr instead of stdout.
# Alternatively, we could have used "stderr=subprocess.STDOUT" when
# initializing process_done, but it is probably a better idea to
# keep stdout and stderr separate.
base_cmd = cmd.strip().split()[0]
if base_cmd == "salmontools":
output_bytes = process_done.stderr
cmd_output = output_bytes.decode().strip()
cmd_info[cmd] = cmd_output
return cmd_info
def get_pip_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a pip-installed
package and the corresponding value is the package's version.
Instead of using: `pip show pkg | grep Version | awk '{print $2}'` to get
each package's version, we save the output of `pip freeze` first, then
check the version of each input package in pkg_list. This approach
launches the subprocess only once and (hopefully) saves some computational
resource.
"""
process_done = subprocess.run(["pip", "freeze"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("'pip freeze' failed: %s" % process_done.stderr.decode().strip())
frozen_pkgs = dict()
for item in process_done.stdout.decode().split():
name, version = item.split("==")
frozen_pkgs[name] = version
pkg_info = dict()
for pkg in pkg_list:
try:
version = frozen_pkgs[pkg]
except KeyError:
raise Exception("Pip package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_bioc_version():
"""Returns a string that is the version of "Bioconductor" package in R.
Note that the data frame returned by installed.packages() does NOT include
a package named "Bioconductor", so we have to launch another R command to
find "Bioconductor" version.
"""
r_command = "tools:::.BioC_version_associated_with_R_version()"
process_done = subprocess.run(
["Rscript", "-e", r_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"R command failed to retrieve Bioconductor version: %s"
% process_done.stderr.decode().strip()
)
version = process_done.stdout.decode().strip().split()[-1]
version = version[1:-1] # Remove the leading and trailing non-ascii characters.
if len(version) == 0:
raise Exception("Bioconductor not found")
return version
def get_r_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a R package
and the corresponding value is the package's version.
"""
# Use "Rscript -e <R_commands>" command to get all user-installed R packages.
r_commands = "packages.df <- as.data.frame(installed.packages()[, c(1, 3:4)]); \
packages.df <- packages.df[is.na(packages.df$Priority), 1:2, drop=FALSE]; \
colnames(packages.df) <- NULL; \
print(packages.df, row.names=FALSE);"
process_done = subprocess.run(
["Rscript", "-e", r_commands], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"R command failed to retrieves installed packages: %s"
% process_done.stderr.decode().strip()
)
r_pkgs = dict()
for item in process_done.stdout.decode().strip().split("\n"):
name, version = item.strip().split()
r_pkgs[name] = version
# "Brainarray" is a collection that consists of 121 ".*ensgprobe" packages.
# They share the same version number, so we use 'hgu133plus2hsensgprobe'
# package to report this uniform version.
ba_proxy_pkg = "hgu133plus2hsensgprobe"
pkg_info = dict()
for pkg in pkg_list:
if pkg == "Bioconductor":
version = get_bioc_version()
else:
try:
version = r_pkgs[pkg] if pkg != "Brainarray" else r_pkgs[ba_proxy_pkg]
except KeyError:
raise Exception("R package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_checksums(filenames_list):
"""Returns a dictionary in which each key is a file's name and the
corresponding value is the file's md5 checksum.
"""
checksums = dict()
for filename in filenames_list:
abs_filepath = os.path.join(DIRNAME, filename)
process_done = subprocess.run(
["md5sum", abs_filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception("md5sum command error:", process_done.stderr.decode().strip())
checksum_str = process_done.stdout.decode().strip().split()[0]
checksums[filename] = checksum_str
return checksums
def get_runtime_env(yml_filename):
"""Reads input YAML filename and returns a dictionary in which each key
is a category name of runtime environment and the corresponding value
is an object that includes version information of packages listed in
that category.
"""
runtime_env = dict()
with open(yml_filename) as yml_fh:
pkgs = yaml.load(yml_fh)
for pkg_type, pkg_list in pkgs.items():
if pkg_type == "os_distribution":
value = get_os_distro()
elif pkg_type == "os_pkg":
value = get_os_pkgs(pkg_list)
elif pkg_type == "cmd_line":
value = get_cmd_lines(pkg_list)
elif pkg_type == "python":
value = get_pip_pkgs(pkg_list)
elif pkg_type == "R":
value = get_r_pkgs(pkg_list)
elif pkg_type == "checksum":
value = get_checksums(pkg_list)
else:
raise Exception("Unknown category in %s: %s" % (yml_filename, pkg_type))
runtime_env[pkg_type] = value
return runtime_env
def find_processor(enum_key):
"""Retursn either a newly created Processor record, or the one in
database that matches the current processor name, version and environment.
"""
name = ProcessorEnum[enum_key].value["name"]
docker_image = ProcessorEnum[enum_key].value["docker_img"]
# In current implementation, ALWAYS get the runtime environment.
yml_path = os.path.join(DIRNAME, ProcessorEnum[enum_key].value["yml_file"])
environment = get_runtime_env(yml_path)
obj, status = Processor.objects.get_or_create(
name=name, version=SYSTEM_VERSION, docker_image=docker_image, environment=environment
)
return obj
def handle_processor_exception(job_context, processor_key, ex):
err_str = "Failed to set processor: %s" % ex
logger.error(err_str, job_id=job_context["job"].id, processor=processor_key)
job_context["job"].failure_reason = err_str
job_context["success"] = False
return job_context
def cache_keys(*keys, work_dir_key="work_dir"):
""" Decorator to be applied to a pipeline function.
Returns a new function that calls the original one and caches the given
keys into the `work_dir`. On the next call it will load those keys (if they
exist) and add them to the job_context instead of executing the function. """
def inner(func):
# generate a unique name for the cache based on the pipeline name
# and the cached keys
cache_name = "__".join(list(keys) + [func.__name__])
def pipeline(job_context):
cache_path = os.path.join(job_context[work_dir_key], cache_name)
if os.path.exists(cache_path):
# cached values exist, load keys from cacke
try:
values = pickle.load(open(cache_path, "rb"))
return {**job_context, **values}
except:
# don't fail if we can't load the cache
logger.warning(
"Failed to load cached data for pipeline function.",
function_name=func.__name__,
keys=keys,
)
pass
# execute the actual function
job_context = func(job_context)
try:
# save cached data for the next run
values = {key: job_context[key] for key in keys}
pickle.dump(values, open(cache_path, "wb"))
except:
# don't fail if we can't save the cache
logger.warning(
"Failed to cache data for pipeline function.",
function_name=func.__name__,
keys=keys,
)
pass
return job_context
return pipeline
return inner
<|code_end|>
| workers/data_refinery_workers/processors/smasher.py
<|code_start|># -*- coding: utf-8 -*-
import boto3
import csv
import os
import rpy2
import rpy2.robjects as ro
import shutil
import simplejson as json
import string
import warnings
import requests
import psutil
import logging
import time
from botocore.exceptions import ClientError
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from pathlib import Path
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as rlang
from rpy2.robjects.packages import importr
from sklearn import preprocessing
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
OriginalFile,
Pipeline,
SampleResultAssociation,
Dataset,
)
from data_refinery_common.utils import get_env_variable, calculate_file_size, calculate_sha1
from data_refinery_workers.processors import utils, smashing_utils
from urllib.parse import quote
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BODY_HTML = (
Path("data_refinery_workers/processors/smasher_email.min.html").read_text().replace("\n", "")
)
BODY_ERROR_HTML = (
Path("data_refinery_workers/processors/smasher_email_error.min.html")
.read_text()
.replace("\n", "")
)
BYTES_IN_GB = 1024 * 1024 * 1024
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName("DEBUG"))
PROCESS_POOL_SIZE = max(1, int(psutil.cpu_count() / 2 - 1))
SCALERS = {
"MINMAX": preprocessing.MinMaxScaler,
"STANDARD": preprocessing.StandardScaler,
"ROBUST": preprocessing.RobustScaler,
}
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message, total_cpu=psutil.cpu_percent(), process_ram=ram_in_GB, job_id=job_id)
if start_time:
logger.debug("Duration: %s" % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _inner_join(job_context: Dict) -> pd.DataFrame:
"""Performs an inner join across the all_frames key of job_context.
Returns a dataframe, not the job_context.
TODO: This function should be mostly unnecessary now because we
pretty much do this in the smashing utils but I don't want to rip
it out right now .
"""
# Merge all of the frames we've gathered into a single big frame, skipping duplicates.
# TODO: If the very first frame is the wrong platform, are we boned?
merged = job_context["all_frames"][0]
i = 1
old_len_merged = len(merged)
merged_backup = merged
while i < len(job_context["all_frames"]):
frame = job_context["all_frames"][i]
i = i + 1
if i % 1000 == 0:
logger.info("Smashing keyframe", i=i, job_id=job_context["job"].id)
# I'm not sure where these are sneaking in from, but we don't want them.
# Related: https://github.com/AlexsLemonade/refinebio/issues/390
breaker = False
for column in frame.columns:
if column in merged.columns:
breaker = True
if breaker:
logger.warning(
"Column repeated for smash job!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
column=column,
)
continue
# This is the inner join, the main "Smash" operation
merged = merged.merge(frame, how="inner", left_index=True, right_index=True)
new_len_merged = len(merged)
if new_len_merged < old_len_merged:
logger.warning(
"Dropped rows while smashing!",
dataset_id=job_context["dataset"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
)
if new_len_merged == 0:
logger.warning(
"Skipping a bad merge frame!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
bad_frame_number=i,
)
merged = merged_backup
new_len_merged = len(merged)
try:
job_context["unsmashable_files"].append(frame.columns[0])
except Exception:
# Something is really, really wrong with this frame.
pass
old_len_merged = len(merged)
merged_backup = merged
return merged
def process_frames_for_key(key: str, input_files: List[ComputedFile], job_context: Dict) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the key 'all_frames', a list of pandas
dataframes containing all the samples' data. Also adds the key
'unsmashable_files' containing a list of paths that were
determined to be unsmashable.
"""
job_context["original_merged"] = pd.DataFrame()
start_all_frames = log_state(
"Building list of all_frames key {}".format(key), job_context["job"].id
)
job_context["all_frames"] = []
for (computed_file, sample) in input_files:
frame_data = smashing_utils.process_frame(
job_context["work_dir"],
computed_file,
sample.accession_code,
job_context["dataset"].aggregate_by,
)
if frame_data is not None:
job_context["all_frames"].append(frame_data)
else:
logger.warning(
"Unable to smash file",
computed_file=computed_file.id,
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
)
job_context["unsmashable_files"].append(computed_file.filename)
log_state(
"Finished building list of all_frames key {}".format(key),
job_context["job"].id,
start_all_frames,
)
return job_context
def _smash_key(job_context: Dict, key: str, input_files: List[ComputedFile]) -> Dict:
"""Smash all of the input files together for a given key.
Steps:
Combine common genes (pandas merge)
Transpose such that genes are columns (features)
Scale features with sci-kit learn
Transpose again such that samples are columns and genes are rows
"""
start_smash = log_state("start _smash_key for {}".format(key), job_context["job"].id)
# Check if we need to copy the quant.sf files
if job_context["dataset"].quant_sf_only:
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
samples = [sample for (_, sample) in input_files]
job_context["num_samples"] += smashing_utils.sync_quant_files(outfile_dir, samples)
# we ONLY want to give quant sf files to the user if that's what they requested
return job_context
job_context = process_frames_for_key(key, input_files, job_context)
if len(job_context["all_frames"]) < 1:
logger.error(
"Was told to smash a key with no frames!", job_id=job_context["job"].id, key=key
)
# TODO: is this the proper way to handle this? I can see us
# not wanting to fail an entire dataset because one experiment
# had a problem, but I also think it could be problematic to
# just skip an experiment and pretend nothing went wrong.
return job_context
merged = _inner_join(job_context)
job_context["original_merged"] = merged
log_state("end build all frames", job_context["job"].id, start_smash)
start_qn = log_state("start qn", job_context["job"].id, start_smash)
# Quantile Normalization
if job_context["dataset"].quantile_normalize:
job_context["merged_no_qn"] = merged
job_context["organism"] = job_context["dataset"].get_samples().first().organism
job_context = smashing_utils.quantile_normalize(job_context)
merged = job_context.get("merged_qn", None)
# End QN
log_state("end qn", job_context["job"].id, start_qn)
# Transpose before scaling
transposed = merged.transpose()
start_scaler = log_state("starting scaler", job_context["job"].id)
# Scaler
if job_context["dataset"].scale_by != "NONE":
scale_funtion = SCALERS[job_context["dataset"].scale_by]
scaler = scale_funtion(copy=True)
scaler.fit(transposed)
scaled = pd.DataFrame(
scaler.transform(transposed), index=transposed.index, columns=transposed.columns
)
# Untranspose
untransposed = scaled.transpose()
else:
# Wheeeeeeeeeee
untransposed = transposed.transpose()
log_state("end scaler", job_context["job"].id, start_scaler)
# This is just for quality assurance in tests.
job_context["final_frame"] = untransposed
# Normalize the Header format
untransposed.index.rename("Gene", inplace=True)
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
outfile = outfile_dir + key + ".tsv"
job_context["smash_outfile"] = outfile
untransposed.to_csv(outfile, sep="\t", encoding="utf-8")
log_state("end _smash_key for {}".format(key), job_context["job"].id, start_smash)
return job_context
def _smash_all(job_context: Dict) -> Dict:
"""Perform smashing on all species/experiments in the dataset.
"""
start_smash = log_state("start smash", job_context["job"].id)
job_context["unsmashable_files"] = []
job_context["num_samples"] = 0
# Smash all of the sample sets
logger.debug(
"About to smash!",
dataset_count=len(job_context["dataset"].data),
job_id=job_context["job"].id,
)
try:
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop("input_files").items():
job_context = _smash_key(job_context, key, input_files)
except Exception as e:
raise utils.ProcessorJobError(
"Could not smash dataset: " + str(e),
success=False,
dataset_id=job_context["dataset"].id,
num_input_files=job_context["num_input_files"],
)
smashing_utils.write_non_data_files(job_context)
# Finally, compress all files into a zip
final_zip_base = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk)
try:
shutil.make_archive(final_zip_base, "zip", job_context["output_dir"])
except:
raise utils.ProcessorJobError("Smash Error while generating zip file", success=False)
job_context["output_file"] = final_zip_base + ".zip"
job_context["dataset"].success = True
job_context["dataset"].save()
logger.debug("Created smash output!", archive_location=job_context["output_file"])
log_state("end smash", job_context["job"].id, start_smash)
return job_context
def _upload(job_context: Dict) -> Dict:
""" Uploads the result file to S3 and notifies user. """
if not job_context.get("upload", True) or not settings.RUNNING_IN_CLOUD:
return job_context
s3_client = boto3.client("s3")
output_filename = job_context["output_file"].split("/")[-1]
try:
# Note that file expiry is handled by the S3 object lifecycle,
# managed by terraform.
s3_client.upload_file(
job_context["output_file"],
RESULTS_BUCKET,
output_filename,
ExtraArgs={"ACL": "public-read"},
)
except Exception as e:
raise utils.ProcessorJobError(
"Failed to upload smash result file.", success=False, file=job_context["output_file"]
)
result_url = "https://s3.amazonaws.com/" + RESULTS_BUCKET + "/" + output_filename
job_context["result_url"] = result_url
logger.debug("Result uploaded!", result_url=job_context["result_url"])
# File is uploaded, we can delete the local.
try:
os.remove(job_context["output_file"])
except OSError:
pass
return job_context
def _notify(job_context: Dict) -> Dict:
""" Use AWS SES to notify a user of a smash result.. """
if not job_context.get("upload", True) or not settings.RUNNING_IN_CLOUD:
return job_context
# Send a notification to slack when a dataset fails to be processed
if job_context["job"].success is False:
try:
_notify_slack_failed_dataset(job_context)
except Exception as e:
logger.warn(e) # It doesn't really matter if this didn't work
# Don't send an email if we don't have address.
if job_context["dataset"].email_address:
# Try to send the email.
try:
_notify_send_email(job_context)
# Display an error if something goes wrong.
except ClientError as e:
raise utils.ProcessorJobError(
"ClientError while notifying",
success=False,
exc_info=1,
client_error_message=e.response["Error"]["Message"],
)
except Exception as e:
raise utils.ProcessorJobError(
"General failure when trying to send email.",
success=False,
exc_info=1,
result_url=job_context["result_url"],
)
# We don't want to retry this dataset after we send a notification to users
# https://github.com/alexslemonade/refinebio/issues/1944
job_context["job"].no_retry = True
job_context["job"].save()
return job_context
def _notify_slack_failed_dataset(job_context: Dict):
""" Send a slack notification when a dataset fails to smash """
# Link to the dataset page, where the user can re-try the download job
dataset_url = "https://www.refine.bio/dataset/" + str(job_context["dataset"].id)
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments": [
{
"fallback": "Dataset failed processing.",
"title": "Dataset failed processing",
"title_link": dataset_url,
"color": "#db3b28",
"text": job_context["job"].failure_reason,
"fields": [
{
"title": "Dataset id",
"value": str(job_context["dataset"].id),
"short": True,
},
{
"title": "Email",
"value": job_context["dataset"].email_address,
"short": True,
},
],
"footer": "Refine.bio",
"footer_icon": "https://s3.amazonaws.com/refinebio-email/logo-2x.png",
}
],
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def _notify_send_email(job_context):
""" Send email notification to the user if the dataset succeded or failed. """
dataset_url = "https://www.refine.bio/dataset/" + str(job_context["dataset"].id)
SENDER = "Refine.bio Mail Robot <noreply@refine.bio>"
RECIPIENT = job_context["dataset"].email_address
AWS_REGION = "us-east-1"
CHARSET = "UTF-8"
if job_context["job"].success is False:
SUBJECT = "There was a problem processing your refine.bio dataset :("
BODY_TEXT = (
"We tried but were unable to process your requested dataset. Error was: \n\n"
+ str(job_context["job"].failure_reason)
+ "\nDataset ID: "
+ str(job_context["dataset"].id)
+ "\n We have been notified and are looking into the problem. \n\nSorry!"
)
ERROR_EMAIL_TITLE = quote("I can't download my dataset")
ERROR_EMAIL_BODY = quote(
"""
[What browser are you using?]
[Add details of the issue you are facing]
---
"""
+ str(job_context["dataset"].id)
)
FORMATTED_HTML = (
BODY_ERROR_HTML.replace("REPLACE_DATASET_URL", dataset_url)
.replace("REPLACE_ERROR_TEXT", job_context["job"].failure_reason)
.replace(
"REPLACE_NEW_ISSUE",
"https://github.com/AlexsLemonade/refinebio/issues/new?title={0}&body={1}&labels=bug".format(
ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY
),
)
.replace(
"REPLACE_MAILTO",
"mailto:ccdl@alexslemonade.org?subject={0}&body={1}".format(
ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY
),
)
)
job_context["success"] = False
else:
SUBJECT = "Your refine.bio Dataset is Ready!"
BODY_TEXT = "Hot off the presses:\n\n" + dataset_url + "\n\nLove!,\nThe refine.bio Team"
FORMATTED_HTML = BODY_HTML.replace("REPLACE_DOWNLOAD_URL", dataset_url).replace(
"REPLACE_DATASET_URL", dataset_url
)
# Create a new SES resource and specify a region.
client = boto3.client("ses", region_name=AWS_REGION)
# Provide the contents of the email.
response = client.send_email(
Destination={"ToAddresses": [RECIPIENT,],},
Message={
"Body": {
"Html": {"Charset": CHARSET, "Data": FORMATTED_HTML,},
"Text": {"Charset": CHARSET, "Data": BODY_TEXT,},
},
"Subject": {"Charset": CHARSET, "Data": SUBJECT,},
},
Source=SENDER,
)
def _update_result_objects(job_context: Dict) -> Dict:
"""Closes out the dataset object."""
dataset = job_context["dataset"]
dataset.s3_bucket = RESULTS_BUCKET
dataset.s3_key = job_context["output_file"].split("/")[-1]
dataset.size_in_bytes = calculate_file_size(job_context["output_file"])
dataset.sha1 = calculate_sha1(job_context["output_file"])
dataset.is_processing = False
dataset.is_processed = True
dataset.is_available = True
dataset.expires_on = timezone.now() + timedelta(days=7)
dataset.save()
job_context["success"] = True
return job_context
def smash(job_id: int, upload=True) -> None:
""" Main Smasher interface """
pipeline = Pipeline(name=PipelineEnum.SMASHER.value)
job_context = utils.run_pipeline(
{"job_id": job_id, "upload": upload, "pipeline": pipeline},
[
utils.start_job,
smashing_utils.prepare_files,
_smash_all,
_upload,
_update_result_objects,
utils.end_job,
],
)
# ensure that `notify` is always called so that users get emails in case processing fails or succeeds
job_context = _notify(job_context)
return job_context
<|code_end|>
workers/data_refinery_workers/processors/smashing_utils.py
<|code_start|># -*- coding: utf-8 -*-
import csv
import logging
import math
import os
import multiprocessing
import shutil
import time
from pathlib import Path
from typing import Dict, List, Tuple
from concurrent.futures import ThreadPoolExecutor
from django.utils import timezone
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as rlang
from rpy2.robjects.packages import importr
import numpy as np
import pandas as pd
import psutil
import rpy2.robjects as ro
import simplejson as json
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import ComputedFile, Sample
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
MULTIPROCESSING_MAX_THREAD_COUNT = max(1, math.floor(multiprocessing.cpu_count() / 2) - 1)
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BODY_HTML = (
Path("data_refinery_workers/processors/smasher_email.min.html").read_text().replace("\n", "")
)
BODY_ERROR_HTML = (
Path("data_refinery_workers/processors/smasher_email_error.min.html")
.read_text()
.replace("\n", "")
)
BYTES_IN_GB = 1024 * 1024 * 1024
QN_CHUNK_SIZE = 10000
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName("DEBUG"))
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message, total_cpu=psutil.cpu_percent(), process_ram=ram_in_GB, job_id=job_id)
if start_time:
logger.debug("Duration: %s" % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def prepare_files(job_context: Dict) -> Dict:
"""
Fetches and prepares the files to smash.
"""
start_prepare_files = log_state("start prepare files", job_context["job"].id)
found_files = False
job_context["filtered_samples"] = {}
job_context["input_files"] = {}
# `key` can either be the species name or experiment accession.
for key, samples in job_context["samples"].items():
smashable_files = []
seen_files = set()
for sample in samples:
smashable_file = sample.get_most_recent_smashable_result_file()
if smashable_file is not None and smashable_file not in seen_files:
smashable_files = smashable_files + [(smashable_file, sample)]
seen_files.add(smashable_file)
found_files = True
else:
sample_metadata = sample.to_metadata_dict()
job_context["filtered_samples"][sample.accession_code] = {
**sample_metadata,
"reason": "This sample did not have a processed file associated with it in our database.",
"experiment_accession_code": get_experiment_accession(
sample.accession_code, job_context["dataset"].data
),
}
job_context["input_files"][key] = smashable_files
job_context["num_input_files"] = len(job_context["input_files"])
job_context["group_by_keys"] = list(job_context["input_files"].keys())
if not found_files:
raise utils.ProcessorJobError(
"Couldn't get any files to smash for Smash job!!",
success=False,
dataset_id=job_context["dataset"].id,
num_samples=len(job_context["samples"]),
)
dataset_id = str(job_context["dataset"].pk)
job_context["work_dir"] = "/home/user/data_store/smashed/" + dataset_id + "/"
# Ensure we have a fresh smash directory
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
os.makedirs(job_context["work_dir"])
job_context["output_dir"] = job_context["work_dir"] + "output/"
os.makedirs(job_context["output_dir"])
log_state("end prepare files", job_context["job"].id, start_prepare_files)
return job_context
def _load_and_sanitize_file(computed_file_path) -> pd.DataFrame:
""" Read and sanitize a computed file """
data = pd.read_csv(
computed_file_path,
sep="\t",
header=0,
index_col=0,
dtype={0: str, 1: np.float32},
error_bad_lines=False,
)
# Strip any funky whitespace
data.columns = data.columns.str.strip()
data = data.dropna(axis="columns", how="all")
# Make sure the index type is correct
data.index = data.index.map(str)
# Ensure that we don't have any dangling Brainarray-generated probe symbols.
# BA likes to leave '_at', signifying probe identifiers,
# on their converted, non-probe identifiers. It makes no sense.
# So, we chop them off and don't worry about it.
data.index = data.index.str.replace("_at", "")
# Remove any lingering Affymetrix control probes ("AFFX-")
data = data[~data.index.str.contains("AFFX-")]
# If there are any _versioned_ gene identifiers, remove that
# version information. We're using the latest brainarray for everything anyway.
# Jackie says this is okay.
# She also says that in the future, we may only want to do this
# for cross-technology smashes.
# This regex needs to be able to handle EGIDs in the form:
# ENSGXXXXYYYZZZZ.6
# and
# fgenesh2_kg.7__3016__AT5G35080.1 (via http://plants.ensembl.org/Arabidopsis_lyrata/ \
# Gene/Summary?g=fgenesh2_kg.7__3016__AT5G35080.1;r=7:17949732-17952000;t=fgenesh2_kg. \
# 7__3016__AT5G35080.1;db=core)
data.index = data.index.str.replace(r"(\.[^.]*)$", "")
# Squish duplicated rows together.
# XXX/TODO: Is mean the appropriate method here?
# We can make this an option in future.
# Discussion here: https://github.com/AlexsLemonade/refinebio/issues/186#issuecomment-395516419
data = data.groupby(data.index, sort=False).mean()
return data
def process_frame(work_dir, computed_file, sample_accession_code, aggregate_by) -> pd.DataFrame:
""" Downloads the computed file from S3 and tries to see if it's smashable.
Returns a data frame if the file can be processed or False otherwise. """
try:
# Download the file to a job-specific location so it
# won't disappear while we're using it.
computed_file_path = computed_file.get_synced_file_path(
path="%s%s" % (work_dir, computed_file.filename)
)
# Bail appropriately if this isn't a real file.
if not computed_file_path or not os.path.exists(computed_file_path):
logger.warning(
"Smasher received non-existent file path.",
computed_file_path=computed_file_path,
computed_file_id=computed_file.id,
)
return None
data = _load_and_sanitize_file(computed_file_path)
if len(data.columns) > 2:
# Most of the time, >1 is actually bad, but we also need to support
# two-channel samples. I think ultimately those should be given some kind of
# special consideration.
logger.info(
"Found a frame with more than 2 columns - this shouldn't happen!",
computed_file_path=computed_file_path,
computed_file_id=computed_file.id,
)
return None
# via https://github.com/AlexsLemonade/refinebio/issues/330:
# aggregating by experiment -> return untransformed output from tximport
# aggregating by species -> log2(x + 1) tximport output
if aggregate_by == "SPECIES" and computed_file.has_been_log2scaled():
data = data + 1
data = np.log2(data)
# Ideally done in the NO-OPPER, but sanity check here.
if (not computed_file.has_been_log2scaled()) and (data.max() > 100).any():
logger.info("Detected non-log2 microarray data.", computed_file_id=computed_file.id)
data = np.log2(data)
# Explicitly title this dataframe
try:
data.columns = [sample_accession_code]
except ValueError as e:
# This sample might have multiple channels, or something else.
# Don't mess with it.
logger.warn(
"Smasher found multi-channel column (probably) - skipping!",
exc_info=1,
computed_file_path=computed_file_path,
)
return None
except Exception as e:
# Okay, somebody probably forgot to create a SampleComputedFileAssociation
# Don't mess with it.
logger.warn(
"Smasher found very bad column title - skipping!",
exc_info=1,
computed_file_path=computed_file_path,
)
return None
except Exception as e:
logger.exception("Unable to smash file", file=computed_file_path)
return None
# TEMPORARY for iterating on compendia more quickly.
# finally:
# # Delete before archiving the work dir
# if computed_file_path and os.path.exists(computed_file_path):
# os.remove(computed_file_path)
return data
def load_first_pass_data_if_cached(work_dir: str):
path = os.path.join(work_dir, "first_pass.csv")
try:
with open(path, newline="") as csvfile:
reader = csv.reader(csvfile)
gene_ids = next(reader)
microarray_columns = next(reader)
rnaseq_columns = next(reader)
return {
"gene_ids": gene_ids,
"microarray_columns": microarray_columns,
"rnaseq_columns": rnaseq_columns,
}
# If the file doesn't exist then the gene ids aren't cached. Any
# other exception should be handled and higher in the stack.
except FileNotFoundError:
return None
def cache_first_pass(
job_context: Dict, gene_ids: List[str], microarray_columns: List[str], rnaseq_columns: List[str]
):
try:
path = os.path.join(job_context["work_dir"], "first_pass.csv")
logger.info(
"Caching gene_ids, microarray_columns, and rnaseq_columns to %s",
path,
job_id=job_context["job"].id,
)
with open(path, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(gene_ids)
writer.writerow(microarray_columns)
writer.writerow(rnaseq_columns)
# Nothing in the above try should raise an exception, but if it
# does don't waste the work we did in the first pass.
except Exception:
logger.exception(
"Error writing gene identifiers to CSV file.", job_id=job_context["job"].id
)
def process_frames_for_key(
key: str, input_files: List[Tuple[ComputedFile, Sample]], job_context: Dict
) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the keys 'microarray_matrix' and
'rnaseq_matrix' with pandas dataframes containing all of the
samples' data. Also adds the key 'unsmashable_files' containing a
list of paths that were determined to be unsmashable.
"""
start_gene_ids = log_state(
"Collecting all gene identifiers for key {}".format(key), job_context["job"].id
)
# Build up a list of gene identifiers because these will be the
# rows of our matrices, and we want to preallocate them so we need
# to know them all.
## We may have built this list in a previous job, check to see if it's cached:
cached_data = load_first_pass_data_if_cached(job_context["work_dir"])
first_pass_was_cached = False
if cached_data:
logger.info(
(
"The data from the first pass was cached, so we're using "
"that and skipping the first pass."
),
job_id=job_context["job"].id,
)
first_pass_was_cached = True
all_gene_identifiers = cached_data["gene_ids"]
microarray_columns = cached_data["microarray_columns"]
rnaseq_columns = cached_data["rnaseq_columns"]
else:
gene_identifier_counts = {}
microarray_columns = []
rnaseq_columns = []
for index, (computed_file, sample) in enumerate(input_files):
log_state("1st processing frame {}".format(index), job_context["job"].id)
frame_data = process_frame(
job_context["work_dir"],
computed_file,
sample.accession_code,
job_context["dataset"].aggregate_by,
)
if frame_data is None:
# we were unable to process this sample, so we drop
logger.warning(
"Unable to smash file",
computed_file=computed_file.id,
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
)
sample_metadata = sample.to_metadata_dict()
job_context["filtered_samples"][sample.accession_code] = {
**sample_metadata,
"reason": "The file associated with this sample did not pass the QC checks we apply before aggregating.",
"filename": computed_file.filename,
"experiment_accession_code": get_experiment_accession(
sample.accession_code, job_context["dataset"].data
),
}
continue
# Count how many frames are in each tech so we can preallocate
# the matrices in both directions.
for gene_id in frame_data.index:
if gene_id in gene_identifier_counts:
gene_identifier_counts[gene_id] += 1
else:
gene_identifier_counts[gene_id] = 1
# Each dataframe should only have 1 column, but it's
# returned as a list so use extend.
if sample.technology == "MICROARRAY":
microarray_columns.extend(frame_data.columns)
elif sample.technology == "RNA-SEQ":
rnaseq_columns.extend(frame_data.columns)
# We only want to use gene identifiers which are present
# in >50% of the samples. We're doing this because a large
# number of gene identifiers present in only a modest
# number of experiments have leaked through. We wouldn't
# necessarily want to do this if we'd mapped all the data
# to ENSEMBL identifiers successfully.
total_samples = len(microarray_columns) + len(rnaseq_columns)
all_gene_identifiers = [
gene_id
for gene_id in gene_identifier_counts
if gene_identifier_counts[gene_id] > (total_samples * 0.5)
]
all_gene_identifiers.sort()
del gene_identifier_counts
log_template = (
"Collected {0} gene identifiers for {1} across"
" {2} micrarry samples and {3} RNA-Seq samples."
)
log_state(
log_template.format(
len(all_gene_identifiers), key, len(microarray_columns), len(rnaseq_columns)
),
job_context["job"].id,
start_gene_ids,
)
# Temporarily only cache mouse compendia because it may not succeed.
if not first_pass_was_cached and key == "MUS_MUSCULUS":
cache_first_pass(job_context, all_gene_identifiers, microarray_columns, rnaseq_columns)
start_build_matrix = log_state("Beginning to build the full matrices.", job_context["job"].id)
# Sort the columns so that the matrices are in predictable orders.
microarray_columns.sort()
rnaseq_columns.sort()
# Preallocate the matrices to be the exact size we will need. This
# should prevent any operations from happening while we build it
# up, so the only RAM used will be needed.
job_context["microarray_matrix"] = pd.DataFrame(
data=None, index=all_gene_identifiers, columns=microarray_columns, dtype=np.float32
)
job_context["rnaseq_matrix"] = pd.DataFrame(
data=None, index=all_gene_identifiers, columns=rnaseq_columns, dtype=np.float32
)
for index, (computed_file, sample) in enumerate(input_files):
log_state("2nd processing frame {}".format(index), job_context["job"].id)
frame_data = process_frame(
job_context["work_dir"],
computed_file,
sample.accession_code,
job_context["dataset"].aggregate_by,
)
if frame_data is None:
job_context["unsmashable_files"].append(computed_file.filename)
sample_metadata = sample.to_metadata_dict()
job_context["filtered_samples"][sample.accession_code] = {
**sample_metadata,
"reason": "The file associated with this sample did not contain a vector that fit the expected dimensions of the matrix.",
"filename": computed_file.filename,
"experiment_accession_code": get_experiment_accession(
sample.accession_code, job_context["dataset"].data
),
}
continue
frame_data = frame_data.reindex(all_gene_identifiers)
# The dataframe for each sample will only have one column
# whose header will be the accession code.
column = frame_data.columns[0]
if sample.technology == "MICROARRAY":
job_context["microarray_matrix"][column] = frame_data.values
elif sample.technology == "RNA-SEQ":
job_context["rnaseq_matrix"][column] = frame_data.values
job_context["num_samples"] = 0
if job_context["microarray_matrix"] is not None:
job_context["num_samples"] += len(job_context["microarray_matrix"].columns)
if job_context["rnaseq_matrix"] is not None:
job_context["num_samples"] += len(job_context["rnaseq_matrix"].columns)
log_state(
"Built full matrices for key {}".format(key), job_context["job"].id, start_build_matrix
)
return job_context
# Modified from: http://yaoyao.codes/pandas/2018/01/23/pandas-split-a-dataframe-into-chunks
def _index_marks(num_columns, chunk_size):
return range(chunk_size, math.ceil(num_columns / chunk_size) * chunk_size, chunk_size)
def _split_dataframe_columns(dataframe, chunk_size):
indices = _index_marks(dataframe.shape[1], chunk_size)
return np.split(dataframe, indices, axis=1)
def _quantile_normalize_matrix(target_vector, original_matrix):
preprocessCore = importr("preprocessCore")
as_numeric = rlang("as.numeric")
data_matrix = rlang("data.matrix")
# Convert the smashed frames to an R numeric Matrix
target_vector = as_numeric(target_vector)
# Do so in chunks if the matrix is too large.
if original_matrix.shape[1] <= QN_CHUNK_SIZE:
merged_matrix = data_matrix(original_matrix)
normalized_matrix = preprocessCore.normalize_quantiles_use_target(
x=merged_matrix, target=target_vector, copy=True
)
# And finally convert back to Pandas
ar = np.array(normalized_matrix)
new_merged = pd.DataFrame(ar, columns=original_matrix.columns, index=original_matrix.index)
else:
matrix_chunks = _split_dataframe_columns(original_matrix, QN_CHUNK_SIZE)
for i, chunk in enumerate(matrix_chunks):
R_chunk = data_matrix(chunk)
normalized_chunk = preprocessCore.normalize_quantiles_use_target(
x=R_chunk, target=target_vector, copy=True
)
ar = np.array(normalized_chunk)
start_column = i * QN_CHUNK_SIZE
end_column = (i + 1) * QN_CHUNK_SIZE
original_matrix.iloc[:, start_column:end_column] = ar
new_merged = original_matrix
return new_merged
def _test_qn(merged_matrix):
""" Selects a list of 100 random pairs of columns and performs the KS Test on them.
Returns a list of tuples with the results of the KN test (statistic, pvalue) """
# Verify this QN, related:
# https://github.com/AlexsLemonade/refinebio/issues/599#issuecomment-422132009
data_matrix = rlang("data.matrix")
as_numeric = rlang("as.numeric")
set_seed = rlang("set.seed")
combn = rlang("combn")
ncol = rlang("ncol")
ks_test = rlang("ks.test")
which = rlang("which")
merged_R_matrix = data_matrix(merged_matrix)
set_seed(123)
n = ncol(merged_R_matrix)[0]
m = 2
# Not enough columns to perform KS test - either bad smash or single sample smash.
if n < m:
return None
# This wont work with larger matricies
# https://github.com/AlexsLemonade/refinebio/issues/1860
ncolumns = ncol(merged_R_matrix)
if ncolumns[0] <= 200:
# Convert to NP, Shuffle, Return to R
combos = combn(ncolumns, 2)
ar = np.array(combos)
np.random.shuffle(np.transpose(ar))
else:
indexes = [*range(ncolumns[0])]
np.random.shuffle(indexes)
ar = np.array([*zip(indexes[0:100], indexes[100:200])])
nr, nc = ar.shape
combos = ro.r.matrix(ar, nrow=nr, ncol=nc)
result = []
# adapted from
# https://stackoverflow.com/questions/9661469/r-t-test-over-all-columns
# apply KS test to randomly selected pairs of columns (samples)
for i in range(1, min(ncol(combos)[0], 100)):
value1 = combos.rx(1, i)[0]
value2 = combos.rx(2, i)[0]
test_a = merged_R_matrix.rx(True, value1)
test_b = merged_R_matrix.rx(True, value2)
# RNA-seq has a lot of zeroes in it, which
# breaks the ks_test. Therefore we want to
# filter them out. To do this we drop the
# lowest half of the values. If there's
# still zeroes in there, then that's
# probably too many zeroes so it's okay to
# fail.
median_a = np.median(test_a)
median_b = np.median(test_b)
# `which` returns indices which are
# 1-indexed. Python accesses lists with
# zero-indexes, even if that list is
# actually an R vector. Therefore subtract
# 1 to account for the difference.
test_a = [test_a[i - 1] for i in which(test_a > median_a)]
test_b = [test_b[i - 1] for i in which(test_b > median_b)]
# The python list comprehension gives us a
# python list, but ks_test wants an R
# vector so let's go back.
test_a = as_numeric(test_a)
test_b = as_numeric(test_b)
ks_res = ks_test(test_a, test_b)
statistic = ks_res.rx("statistic")[0][0]
pvalue = ks_res.rx("p.value")[0][0]
result.append((statistic, pvalue))
return result
def quantile_normalize(job_context: Dict, ks_check=True, ks_stat=0.001) -> Dict:
"""
Apply quantile normalization.
"""
# Prepare our QN target file
organism = job_context["organism"]
if not organism.qn_target:
raise utils.ProcessorJobError(
"Could not find QN target for Organism: " + str(organism),
success=False,
organism=organism,
dataset_id=job_context["dataset"].id,
)
qn_target_path = organism.qn_target.computedfile_set.latest().sync_from_s3()
qn_target_frame = pd.read_csv(
qn_target_path, sep="\t", header=None, index_col=None, error_bad_lines=False
)
# Prepare our RPy2 bridge
pandas2ri.activate()
# Remove un-quantiled normalized matrix from job_context
# because we no longer need it.
merged_no_qn = job_context.pop("merged_no_qn")
# Perform the Actual QN
new_merged = _quantile_normalize_matrix(qn_target_frame[0], merged_no_qn)
# And add the quantile normalized matrix to job_context.
job_context["merged_qn"] = new_merged
# For now, don't test the QN for mouse/human. This never fails on
# smasher jobs and is OOM-killing our very large compendia
# jobs. Let's run this manually after we have a compendia job
# actually finish.
if organism.name in ["MUS_MUSCULUS", "HOMO_SAPIENS"]:
return job_context
ks_res = _test_qn(new_merged)
if ks_res:
for (statistic, pvalue) in ks_res:
job_context["ks_statistic"] = statistic
job_context["ks_pvalue"] = pvalue
# We're unsure of how strigent to be about
# the pvalue just yet, so we're extra lax
# rather than failing tons of tests. This may need tuning.
if ks_check and (statistic > ks_stat or pvalue < 0.8):
job_context["ks_warning"] = (
"Failed Kolmogorov Smirnov test! Stat: "
+ str(statistic)
+ ", PVal: "
+ str(pvalue)
)
else:
logger.warning(
"Not enough columns to perform KS test - either bad smash or single sample smash.",
dataset_id=job_context["dataset"].id,
)
return job_context
def compile_metadata(job_context: Dict) -> Dict:
"""Compiles metadata about the job.
Returns a new dict containing the metadata, not the job_context.
"""
metadata = {}
metadata["num_samples"] = job_context["num_samples"]
metadata["num_experiments"] = job_context["experiments"].count()
metadata["quant_sf_only"] = job_context["dataset"].quant_sf_only
if not job_context["dataset"].quant_sf_only:
metadata["aggregate_by"] = job_context["dataset"].aggregate_by
metadata["scale_by"] = job_context["dataset"].scale_by
# https://github.com/AlexsLemonade/refinebio/pull/421#discussion_r203799646
# TODO: do something with these.
# metadata['non_aggregated_files'] = job_context["unsmashable_files"]
metadata["ks_statistic"] = job_context.get("ks_statistic", None)
metadata["ks_pvalue"] = job_context.get("ks_pvalue", None)
metadata["ks_warning"] = job_context.get("ks_warning", None)
metadata["quantile_normalized"] = job_context["dataset"].quantile_normalize
filtered_samples = job_context["filtered_samples"]
samples = {}
for sample in job_context["dataset"].get_samples():
if sample.accession_code in filtered_samples:
# skip the samples that were filtered
continue
samples[sample.accession_code] = sample.to_metadata_dict()
metadata["samples"] = samples
experiments = {}
for experiment in job_context["dataset"].get_experiments():
experiment_metadata = experiment.to_metadata_dict()
# exclude filtered samples from experiment metadata
all_samples = experiment_metadata["sample_accession_codes"]
all_samples = [code for code in all_samples if code not in filtered_samples]
experiment_metadata["sample_accession_codes"] = all_samples
experiments[experiment.accession_code] = experiment_metadata
metadata["experiments"] = experiments
return metadata
def write_non_data_files(job_context: Dict) -> Dict:
"""Writes the files that are not the actual data of the dataset.
This include LICENSE.txt and README.md files and the metadata.
Adds the key `metadata` to job_context and populates it with all
the metadata that needs to be written.
"""
job_context["metadata"] = compile_metadata(job_context)
shutil.copy("README_DATASET.md", job_context["output_dir"] + "README.md")
shutil.copy("LICENSE_DATASET.txt", job_context["output_dir"] + "LICENSE.TXT")
# Write samples metadata to TSV
try:
write_tsv_json(job_context)
# Metadata to JSON
job_context["metadata"]["created_at"] = timezone.now().strftime("%Y-%m-%dT%H:%M:%S")
aggregated_metadata_path = os.path.join(
job_context["output_dir"], "aggregated_metadata.json"
)
with open(aggregated_metadata_path, "w", encoding="utf-8") as metadata_file:
json.dump(job_context["metadata"], metadata_file, indent=4, sort_keys=True)
if job_context["filtered_samples"]:
# generate filtered samples file only if some samples were skipped
filtered_samples_path = os.path.join(
job_context["output_dir"], "filtered_samples_metadata.json"
)
with open(filtered_samples_path, "w", encoding="utf-8") as metadata_file:
json.dump(job_context["filtered_samples"], metadata_file, indent=4, sort_keys=True)
columns = get_tsv_columns(job_context["filtered_samples"])
filtered_samples_tsv_path = os.path.join(
job_context["output_dir"], "filtered_samples_metadata.tsv"
)
with open(filtered_samples_tsv_path, "w", encoding="utf-8") as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter="\t", extrasaction="ignore")
dw.writeheader()
for sample_metadata in job_context["filtered_samples"].values():
dw.writerow(get_tsv_row_data(sample_metadata, job_context["dataset"].data))
except Exception as e:
raise utils.ProcessorJobError("Failed to write metadata TSV!", success=False)
return job_context
def get_experiment_accession(sample_accession_code, dataset_data):
for experiment_accession, samples in dataset_data.items():
if sample_accession_code in samples:
return experiment_accession
return "" # Should never happen, because the sample is by definition in the dataset
def _add_annotation_column(annotation_columns, column_name):
"""Add annotation column names in place.
Any column_name that starts with "refinebio_" will be skipped.
"""
if not column_name.startswith("refinebio_"):
annotation_columns.add(column_name)
def _add_annotation_value(row_data, col_name, col_value, sample_accession_code):
"""Adds a new `col_name` key whose value is `col_value` to row_data.
If col_name already exists in row_data with different value, print
out a warning message.
"""
# Generate a warning message if annotation field name starts with
# "refinebio_". This should rarely (if ever) happen.
if col_name.startswith("refinebio_"):
logger.warning(
"Annotation value skipped",
annotation_field=col_name,
annotation_value=col_value,
sample_accession_code=sample_accession_code,
)
elif col_name not in row_data:
row_data[col_name] = col_value
# Generate a warning message in case of conflicts of annotation values.
# (Requested by Dr. Jackie Taroni)
elif row_data[col_name] != col_value:
logger.warning(
"Conflict of values found in column %s: %s vs. %s"
% (col_name, row_data[col_name], col_value),
sample_accession_code=sample_accession_code,
)
def get_tsv_row_data(sample_metadata, dataset_data):
"""Returns field values based on input sample_metadata.
Some annotation fields are treated specially because they are more
important. See `get_tsv_columns` function above for details.
"""
sample_accession_code = sample_metadata.get("refinebio_accession_code", "")
row_data = dict()
for meta_key, meta_value in sample_metadata.items():
# If the field is a refinebio-specific field, simply copy it.
if meta_key != "refinebio_annotations":
row_data[meta_key] = meta_value
continue
# Decompose sample_metadata["refinebio_annotations"], which is
# an array of annotations.
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# "characteristic" in ArrayExpress annotation
if (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "characteristic"
):
for pair_dict in annotation_value:
if "category" in pair_dict and "value" in pair_dict:
col_name, col_value = pair_dict["category"], pair_dict["value"]
_add_annotation_value(
row_data, col_name, col_value, sample_accession_code
)
# "variable" in ArrayExpress annotation
elif (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "variable"
):
for pair_dict in annotation_value:
if "name" in pair_dict and "value" in pair_dict:
col_name, col_value = pair_dict["name"], pair_dict["value"]
_add_annotation_value(
row_data, col_name, col_value, sample_accession_code
)
# Skip "source" field ArrayExpress sample's annotation
elif (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "source"
):
continue
# "characteristics_ch1" in GEO annotation
elif (
sample_metadata.get("refinebio_source_database", "") == "GEO"
and annotation_key == "characteristics_ch1"
): # array of strings
for pair_str in annotation_value:
if ":" in pair_str:
col_name, col_value = pair_str.split(":", 1)
col_value = col_value.strip()
_add_annotation_value(
row_data, col_name, col_value, sample_accession_code
)
# If annotation_value includes only a 'name' key, extract its value directly:
elif (
isinstance(annotation_value, dict)
and len(annotation_value) == 1
and "name" in annotation_value
):
_add_annotation_value(
row_data, annotation_key, annotation_value["name"], sample_accession_code
)
# If annotation_value is a single-element array, extract the element directly:
elif isinstance(annotation_value, list) and len(annotation_value) == 1:
_add_annotation_value(
row_data, annotation_key, annotation_value[0], sample_accession_code
)
# Otherwise save all annotation fields in separate columns
else:
_add_annotation_value(
row_data, annotation_key, annotation_value, sample_accession_code
)
row_data["experiment_accession"] = get_experiment_accession(sample_accession_code, dataset_data)
return row_data
def get_tsv_columns(samples_metadata):
"""Returns an array of strings that will be written as a TSV file's
header. The columns are based on fields found in samples_metadata.
Some nested annotation fields are taken out as separate columns
because they are more important than the others.
"""
refinebio_columns = set()
annotation_columns = set()
for sample_metadata in samples_metadata.values():
for meta_key, meta_value in sample_metadata.items():
if meta_key != "refinebio_annotations":
refinebio_columns.add(meta_key)
continue
# Decompose sample_metadata["annotations"], which is an array of annotations!
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# For ArrayExpress samples, take out the fields
# nested in "characteristic" as separate columns.
if (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "characteristic"
):
for pair_dict in annotation_value:
if "category" in pair_dict and "value" in pair_dict:
_add_annotation_column(annotation_columns, pair_dict["category"])
# For ArrayExpress samples, also take out the fields
# nested in "variable" as separate columns.
elif (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "variable"
):
for pair_dict in annotation_value:
if "name" in pair_dict and "value" in pair_dict:
_add_annotation_column(annotation_columns, pair_dict["name"])
# For ArrayExpress samples, skip "source" field
elif (
sample_metadata.get("refinebio_source_database", "") == "ARRAY_EXPRESS"
and annotation_key == "source"
):
continue
# For GEO samples, take out the fields nested in
# "characteristics_ch1" as separate columns.
elif (
sample_metadata.get("refinebio_source_database", "") == "GEO"
and annotation_key == "characteristics_ch1"
): # array of strings
for pair_str in annotation_value:
if ":" in pair_str:
tokens = pair_str.split(":", 1)
_add_annotation_column(annotation_columns, tokens[0])
# Saves all other annotation fields in separate columns
else:
_add_annotation_column(annotation_columns, annotation_key)
# Return sorted columns, in which "refinebio_accession_code" and "experiment_accession" are
# always first, followed by the other refinebio columns (in alphabetic order), and
# annotation columns (in alphabetic order) at the end.
refinebio_columns.discard("refinebio_accession_code")
return (
["refinebio_accession_code", "experiment_accession"]
+ sorted(refinebio_columns)
+ sorted(annotation_columns)
)
def write_tsv_json(job_context):
"""Writes tsv files on disk.
If the dataset is aggregated by species, also write species-level
JSON file.
"""
# Avoid pulling this out of job_context repeatedly.
metadata = job_context["metadata"]
# Uniform TSV header per dataset
columns = get_tsv_columns(metadata["samples"])
# Per-Experiment Metadata
if job_context["dataset"].aggregate_by == "EXPERIMENT":
tsv_paths = []
for experiment_title, experiment_data in metadata["experiments"].items():
experiment_dir = job_context["output_dir"] + experiment_title + "/"
experiment_dir = experiment_dir.encode("ascii", "ignore")
os.makedirs(experiment_dir, exist_ok=True)
tsv_path = experiment_dir.decode("utf-8") + "metadata_" + experiment_title + ".tsv"
tsv_path = tsv_path.encode("ascii", "ignore")
tsv_paths.append(tsv_path)
with open(tsv_path, "w", encoding="utf-8") as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter="\t", extrasaction="ignore")
dw.writeheader()
for sample_accession_code, sample_metadata in metadata["samples"].items():
if sample_accession_code in experiment_data["sample_accession_codes"]:
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
return tsv_paths
# Per-Species Metadata
elif job_context["dataset"].aggregate_by == "SPECIES":
tsv_paths = []
for species in job_context["group_by_keys"]:
species_dir = job_context["output_dir"] + species + "/"
os.makedirs(species_dir, exist_ok=True)
samples_in_species = []
tsv_path = species_dir + "metadata_" + species + ".tsv"
tsv_paths.append(tsv_path)
with open(tsv_path, "w", encoding="utf-8") as tsv_file:
# See http://www.lucainvernizzi.net/blog/2015/08/03/8x-speed-up-for-python-s-csv-dictwriter/
# about extrasaction.
dw = csv.DictWriter(tsv_file, columns, delimiter="\t", extrasaction="ignore")
dw.writeheader()
i = 0
for sample_metadata in metadata["samples"].values():
if sample_metadata.get("refinebio_organism", "") == species:
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
samples_in_species.append(sample_metadata)
i = i + 1
if i % 1000 == 0:
progress_template = (
"Done with {0} out of {1} lines of metadata " "for species {2}"
)
log_state(
progress_template.format(i, len(metadata["samples"]), species),
job_context["job"].id,
)
# Writes a json file for current species:
if len(samples_in_species):
species_metadata = {"species": species, "samples": samples_in_species}
json_path = species_dir + "metadata_" + species + ".json"
with open(json_path, "w", encoding="utf-8") as json_file:
json.dump(species_metadata, json_file, indent=4, sort_keys=True)
return tsv_paths
# All Metadata
else:
all_dir = job_context["output_dir"] + "ALL/"
os.makedirs(all_dir, exist_ok=True)
tsv_path = all_dir + "metadata_ALL.tsv"
with open(tsv_path, "w", encoding="utf-8") as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter="\t", extrasaction="ignore")
dw.writeheader()
for sample_metadata in metadata["samples"].values():
row_data = get_tsv_row_data(sample_metadata, job_context["dataset"].data)
dw.writerow(row_data)
return [tsv_path]
def download_computed_file(download_tuple: Tuple[ComputedFile, str]):
""" this function downloads the latest computed file. Receives a tuple with
the computed file and the path where it needs to be downloaded
This is used to parallelize downloading quantsf files. """
(latest_computed_file, output_file_path) = download_tuple
try:
latest_computed_file.get_synced_file_path(path=output_file_path)
except:
# Let's not fail if there's an error syncing one of the quant.sf files
logger.exception("Failed to sync computed file", computed_file_id=latest_computed_file.pk)
def sync_quant_files(output_path, samples: List[Sample]):
""" Takes a list of ComputedFiles and copies the ones that are quant files to the provided directory.
Returns the total number of samples that were included """
num_samples = 0
page_size = 100
# split the samples in groups and download each one individually
with ThreadPoolExecutor(max_workers=MULTIPROCESSING_MAX_THREAD_COUNT) as executor:
# for each sample we need it's latest quant.sf file we don't want to query the db
# for all of them, so we do it in groups of 100, and then download all of the computed_files
# in parallel
for sample_page in (
samples[i * page_size : i + page_size] for i in range(0, len(samples), page_size)
):
sample_and_computed_files = []
for sample in sample_page:
latest_computed_file = sample.get_most_recent_quant_sf_file()
if not latest_computed_file:
continue
output_file_path = output_path + sample.accession_code + "_quant.sf"
sample_and_computed_files.append((latest_computed_file, output_file_path))
# download this set of files, this will take a few seconds that should also help the db recover
executor.map(download_computed_file, sample_and_computed_files)
num_samples += len(sample_and_computed_files)
return num_samples
<|code_end|>
workers/data_refinery_workers/processors/utils.py
<|code_start|>import os
import random
import shutil
import signal
import string
import subprocess
import sys
import yaml
import pickle
from django.conf import settings
from django.utils import timezone
from typing import List, Dict, Callable
from data_refinery_common.job_lookup import ProcessorEnum, ProcessorPipeline, SMASHER_JOB_TYPES
from data_refinery_common.job_management import create_downloader_job
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
Dataset,
DownloaderJob,
DownloaderJobOriginalFileAssociation,
OriginalFile,
OriginalFileSampleAssociation,
Pipeline,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
ProcessorJobOriginalFileAssociation,
Sample,
)
from data_refinery_common.utils import (
get_env_variable,
get_env_variable_gracefully,
get_instance_id,
)
logger = get_and_configure_logger(__name__)
# Let this fail if SYSTEM_VERSION is unset.
SYSTEM_VERSION = get_env_variable("SYSTEM_VERSION")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
S3_QN_TARGET_BUCKET_NAME = get_env_variable("S3_QN_TARGET_BUCKET_NAME", "data-refinery")
DIRNAME = os.path.dirname(os.path.abspath(__file__))
CURRENT_JOB = None
def signal_handler(sig, frame):
"""Signal Handler, works for both SIGTERM and SIGINT"""
global CURRENT_JOB
if CURRENT_JOB:
CURRENT_JOB.success = False
CURRENT_JOB.end_time = timezone.now()
CURRENT_JOB.num_retries = CURRENT_JOB.num_retries - 1
CURRENT_JOB.failure_reason = "Interruped by SIGTERM/SIGINT: " + str(sig)
CURRENT_JOB.save()
sys.exit(0)
def prepare_original_files(job_context):
""" Provision in the Job context for OriginalFile-driven processors
"""
job = job_context["job"]
original_files = job.original_files.all()
if original_files.count() == 0:
raise ProcessorJobError("No files were found for the job.", success=False)
undownloaded_files = set()
for original_file in original_files:
if original_file.needs_downloading(job_context["job_id"]):
if original_file.is_downloaded:
# If it needs to be downloaded then it's not
# downloaded and the is_downloaded field should stop
# lying about that.
original_file.is_downloaded = False
original_file.save()
undownloaded_files.add(original_file)
if undownloaded_files:
logger.info(
(
"One or more files found which were missing or not downloaded."
" Creating downloader jobs for them and deleting this job."
),
processor_job=job.id,
missing_files=list(undownloaded_files),
)
was_job_created = create_downloader_job(
undownloaded_files, processor_job_id=job_context["job_id"], force=True
)
if not was_job_created:
raise ProcessorJobError(
"Missing file for processor job but unable to recreate downloader jobs!",
success=False,
)
raise ProcessorJobError(
"We can not process the data because it is not on the disk",
success=False,
no_retry=True, # this job should not be retried again
abort=True, # abort the job and don't do anything else
undownloaded_files=[file.id for file in undownloaded_files],
)
job_context["original_files"] = original_files
first_original_file = original_files.first()
samples = Sample.objects.filter(original_files=first_original_file)
job_context["samples"] = samples
job_context["computed_files"] = []
return job_context
def prepare_dataset(job_context):
""" Provision in the Job context for Dataset-driven processors
"""
job = job_context["job"]
job_datasets = job.datasets.all()
# This should never be more than one!
if job_datasets.count() > 1:
raise ProcessorJobError(
"More than one dataset for processor job!", success=False, no_retry=True
)
elif job_datasets.count() == 0:
raise ProcessorJobError(
"No datasets found for processor job!", success=False, no_retry=True
)
dataset = job_datasets.first()
dataset.is_processing = True
dataset.save()
# Get the samples to smash
job_context["dataset"] = dataset
job_context["samples"] = dataset.get_aggregated_samples()
job_context["experiments"] = dataset.get_experiments()
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def start_job(job_context: Dict):
"""A processor function to start jobs.
Record in the database that this job is being started and
retrieves the job's batches from the database and adds them to the
dictionary passed in with the key 'batches'.
"""
job = job_context["job"]
original_file = job.original_files.first()
if (
not job.pipeline_applied == ProcessorPipeline.TXIMPORT.value
and original_file
and not original_file.needs_processing(job_context["job_id"])
):
failure_reason = (
"Sample has a good computed file, it must have been processed, "
"so it doesn't need to be downloaded! Aborting!"
)
logger.error(failure_reason, job_id=job.id, original_file=original_file)
job_context["original_files"] = []
job_context["computed_files"] = []
job_context["abort"] = True
# Will be saved by end_job.
job_context["job"].failure_reason = failure_reason
return job_context
# Set up the SIGTERM handler so we can appropriately handle being interrupted.
# (`docker stop` uses SIGTERM, not SIGINT.)
# (however, Nomad sends an SIGINT so catch both.)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# This job should not have been started, for some reason Nomad restarts some of our jobs
# https://github.com/AlexsLemonade/refinebio/issues/1487
if job.start_time is not None and settings.RUNNING_IN_CLOUD:
# Let's just log the event and let the job run instead of failing
# and also reset the endtime and failure reason, since those fields might have been set
logger.warn(
"ProcessorJob was restarted by Nomad. We do not know why this happened",
processor_job=job.id,
success=job.success,
failure_reason=job.failure_reason,
start_time=job.start_time,
end_time=job.end_time,
)
job.end_time = None
job.failure_reason = None
job.worker_id = get_instance_id()
job.worker_version = SYSTEM_VERSION
job.start_time = timezone.now()
job.save()
global CURRENT_JOB
CURRENT_JOB = job
logger.debug("Starting processor Job.", processor_job=job.id, pipeline=job.pipeline_applied)
# Janitor jobs don't operate on file objects.
# Tximport jobs don't need to download the original file, they
# just need it to know what experiment to process.
if job.pipeline_applied not in [
ProcessorPipeline.JANITOR.value,
ProcessorPipeline.TXIMPORT.value,
]:
# Some jobs take OriginalFiles, other take Datasets
if ProcessorPipeline[job.pipeline_applied] not in SMASHER_JOB_TYPES:
job_context = prepare_original_files(job_context)
if not job_context.get("success", True):
return job_context
else:
job_context = prepare_dataset(job_context)
if not job_context.get("success", True):
return job_context
else:
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def end_job(job_context: Dict, abort=False):
"""A processor function to end jobs.
Record in the database that this job has completed and that
the samples have been processed if not aborted.
"""
job = job_context["job"]
success = job_context.get("success", True)
# Upload first so if this fails we can set success = False and let
# the rest of the function mark it as failed.
if success:
# QN reference files go to a special bucket so they can be
# publicly available.
if job_context["job"].pipeline_applied == "QN_REFERENCE":
s3_bucket = S3_QN_TARGET_BUCKET_NAME
else:
s3_bucket = S3_BUCKET_NAME
# S3-sync Computed Files
for computed_file in job_context.get("computed_files", []):
# Ensure even distribution across S3 servers
nonce = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(24)
)
result = computed_file.sync_to_s3(s3_bucket, nonce + "_" + computed_file.filename)
if result and settings.RUNNING_IN_CLOUD:
computed_file.delete_local_file()
elif not result:
success = False
job_context["success"] = False
job.failure_reason = "Failed to upload computed file."
break
if not success:
for computed_file in job_context.get("computed_files", []):
computed_file.delete_local_file()
if computed_file.id:
computed_file.delete()
# if the processor job fails mark all datasets as failed
if ProcessorPipeline[job.pipeline_applied] in SMASHER_JOB_TYPES:
for dataset in job.datasets.all():
dataset.failure_reason = job.failure_reason
dataset.is_processing = False
dataset.save()
if not abort:
if job_context.get("success", False) and not (
job_context["job"].pipeline_applied
in [
ProcessorPipeline.SMASHER.value,
ProcessorPipeline.QN_REFERENCE.value,
ProcessorPipeline.CREATE_COMPENDIA.value,
ProcessorPipeline.CREATE_QUANTPENDIA.value,
ProcessorPipeline.JANITOR.value,
]
):
# Salmon requires the final `tximport` step to be fully `is_processed`.
mark_as_processed = True
if job_context["job"].pipeline_applied == "SALMON" and not job_context.get(
"tximported", False
):
mark_as_processed = False
if mark_as_processed:
# This handles most of our cases
unique_experiments = []
for sample in job_context.get("samples", []):
sample.is_processed = True
sample.save()
if sample.experiments.all().count() > 0:
unique_experiments = list(
set(unique_experiments + sample.experiments.all()[::1])
)
# Explicitly for the single-salmon scenario
if "sample" in job_context:
sample = job_context["sample"]
sample.is_processed = True
sample.save()
for experiment in unique_experiments:
experiment.update_num_samples()
# If we are aborting, it's because we want to do something
# different, so leave the original files so that "something
# different" can use them.
if (success or job.no_retry) and not abort:
# Cleanup Original Files
if "original_files" in job_context:
for original_file in job_context["original_files"]:
original_file.delete_local_file()
# If the pipeline includes any steps, save it.
if "pipeline" in job_context:
pipeline = job_context["pipeline"]
if len(pipeline.steps):
pipeline.save()
if (
"work_dir" in job_context
and job_context["job"].pipeline_applied != ProcessorPipeline.CREATE_COMPENDIA.value
and settings.RUNNING_IN_CLOUD
):
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
job.abort = abort
job.success = success
job.end_time = timezone.now()
job.save()
if success:
logger.debug(
"Processor job completed successfully.",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
)
else:
if not job.failure_reason:
logger.error(
"Processor job failed without having failure_reason set. FIX ME!!!!!!!!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
)
else:
logger.error(
"Processor job failed!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
failure_reason=job.failure_reason,
)
# Return Final Job context so testers can check it
return job_context
def run_pipeline(start_value: Dict, pipeline: List[Callable]):
"""Runs a pipeline of processor functions.
start_value must contain a key 'job_id' which is a valid id for a
ProcessorJob record.
Each processor fuction must accept a dictionary and return a
dictionary.
Any processor function which returns a dictionary containing a key
of 'success' with a value of False will cause the pipeline to
terminate with a call to utils.end_job.
The key 'job' is reserved for the ProcessorJob currently being
run. It is required that the dictionary returned by each
processor function preserve the mapping for 'job' that was passed
into it.
"""
job_id = start_value["job_id"]
try:
job = ProcessorJob.objects.get(id=job_id)
except ProcessorJob.DoesNotExist:
logger.error("Cannot find processor job record.", processor_job=job_id)
return
if len(pipeline) == 0:
logger.error("Empty pipeline specified.", procesor_job=job_id)
last_result = start_value
last_result["job"] = job
for processor in pipeline:
try:
last_result = processor(last_result)
except ProcessorJobError as e:
e.update_job(job)
logger.exception(e.failure_reason, processor_job=job.id, **e.context)
if e.success is False:
# end_job will use this and set the value
last_result["success"] = False
return end_job(last_result, abort=bool(e.abort))
except Exception as e:
failure_reason = (
"Unhandled exception caught while running processor" " function {} in pipeline: "
).format(processor.__name__)
logger.exception(failure_reason, no_retry=job.no_retry, processor_job=job_id)
last_result["success"] = False
last_result["job"].failure_reason = failure_reason + str(e)
return end_job(last_result)
if "success" in last_result and last_result["success"] is False:
logger.error(
"Processor function %s failed. Terminating pipeline.",
processor.__name__,
processor_job=job_id,
failure_reason=last_result["job"].failure_reason,
)
return end_job(last_result)
if last_result.get("abort", False):
return end_job(last_result, abort=True)
return last_result
class ProcessorJobError(Exception):
""" General processor job error class. """
def __init__(
self, failure_reason, *, success=None, no_retry=None, retried=None, abort=None, **context
):
super(ProcessorJobError, self).__init__(failure_reason)
self.failure_reason = failure_reason
self.success = success
self.no_retry = no_retry
self.retried = retried
self.abort = abort
# additional context to be included when logging
self.context = context
def update_job(self, job):
job.failure_reason = self.failure_reason
if self.success is not None:
job.success = self.success
if self.no_retry is not None:
job.no_retry = self.no_retry
if self.retried is not None:
job.retried = self.retried
if self.abort is not None:
job.abort = self.abort
job.save()
# also update the failure reason if this is a dataset's processor job
for dataset in job.datasets.all():
dataset.failure_reason = self.failure_reason
dataset.success = False
dataset.save()
def get_os_distro():
"""Returns a string of OS distribution.
Since we are using Docker, this function only considers Linux distribution.
Alternative files on Linux: /etc/os-release, /etc/lsb-release
As a matter of fact, "/etc/issue" doesn't exist on Mac OS X. We can use
"sw_vers" command to find its OS information.
A more cross-platform solution is using "platform" module in Python.
"""
with open("/etc/issue") as distro_fh:
return distro_fh.readline().strip("\l\n\\n ")
def get_os_pkgs(pkg_list):
"""Returns a dictionay in which each key is the name of an os-lvel
package and the corresponding value is the package's version.
This function assumes the package manager is Debian-based (dpkg/apt).
"""
pkg_info = dict()
for pkg in pkg_list:
process_done = subprocess.run(
["dpkg-query", "--show", pkg], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"OS-level package %s not found: %s" % (pkg, process_done.stderr.decode().strip())
)
version = process_done.stdout.decode().strip().split("\t")[-1]
pkg_info[pkg] = version
return pkg_info
def get_cmd_lines(cmd_list):
"""Returns a dictionary in which each key is a command string and
the corresponding value is the command's stripped output.
"""
cmd_info = dict()
for cmd in cmd_list:
process_done = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception(
"Failed to run command line '%s': %s" % (cmd, process_done.stderr.decode().strip())
)
output_bytes = process_done.stdout
# Workaround for the "salmontools --version"
# command, whose outputs are sent to stderr instead of stdout.
# Alternatively, we could have used "stderr=subprocess.STDOUT" when
# initializing process_done, but it is probably a better idea to
# keep stdout and stderr separate.
base_cmd = cmd.strip().split()[0]
if base_cmd == "salmontools":
output_bytes = process_done.stderr
cmd_output = output_bytes.decode().strip()
cmd_info[cmd] = cmd_output
return cmd_info
def get_pip_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a pip-installed
package and the corresponding value is the package's version.
Instead of using: `pip show pkg | grep Version | awk '{print $2}'` to get
each package's version, we save the output of `pip freeze` first, then
check the version of each input package in pkg_list. This approach
launches the subprocess only once and (hopefully) saves some computational
resource.
"""
process_done = subprocess.run(["pip", "freeze"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("'pip freeze' failed: %s" % process_done.stderr.decode().strip())
frozen_pkgs = dict()
for item in process_done.stdout.decode().split():
name, version = item.split("==")
frozen_pkgs[name] = version
pkg_info = dict()
for pkg in pkg_list:
try:
version = frozen_pkgs[pkg]
except KeyError:
raise Exception("Pip package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_bioc_version():
"""Returns a string that is the version of "Bioconductor" package in R.
Note that the data frame returned by installed.packages() does NOT include
a package named "Bioconductor", so we have to launch another R command to
find "Bioconductor" version.
"""
r_command = "tools:::.BioC_version_associated_with_R_version()"
process_done = subprocess.run(
["Rscript", "-e", r_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"R command failed to retrieve Bioconductor version: %s"
% process_done.stderr.decode().strip()
)
version = process_done.stdout.decode().strip().split()[-1]
version = version[1:-1] # Remove the leading and trailing non-ascii characters.
if len(version) == 0:
raise Exception("Bioconductor not found")
return version
def get_r_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a R package
and the corresponding value is the package's version.
"""
# Use "Rscript -e <R_commands>" command to get all user-installed R packages.
r_commands = "packages.df <- as.data.frame(installed.packages()[, c(1, 3:4)]); \
packages.df <- packages.df[is.na(packages.df$Priority), 1:2, drop=FALSE]; \
colnames(packages.df) <- NULL; \
print(packages.df, row.names=FALSE);"
process_done = subprocess.run(
["Rscript", "-e", r_commands], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"R command failed to retrieves installed packages: %s"
% process_done.stderr.decode().strip()
)
r_pkgs = dict()
for item in process_done.stdout.decode().strip().split("\n"):
name, version = item.strip().split()
r_pkgs[name] = version
# "Brainarray" is a collection that consists of 121 ".*ensgprobe" packages.
# They share the same version number, so we use 'hgu133plus2hsensgprobe'
# package to report this uniform version.
ba_proxy_pkg = "hgu133plus2hsensgprobe"
pkg_info = dict()
for pkg in pkg_list:
if pkg == "Bioconductor":
version = get_bioc_version()
else:
try:
version = r_pkgs[pkg] if pkg != "Brainarray" else r_pkgs[ba_proxy_pkg]
except KeyError:
raise Exception("R package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_checksums(filenames_list):
"""Returns a dictionary in which each key is a file's name and the
corresponding value is the file's md5 checksum.
"""
checksums = dict()
for filename in filenames_list:
abs_filepath = os.path.join(DIRNAME, filename)
process_done = subprocess.run(
["md5sum", abs_filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception("md5sum command error:", process_done.stderr.decode().strip())
checksum_str = process_done.stdout.decode().strip().split()[0]
checksums[filename] = checksum_str
return checksums
def get_runtime_env(yml_filename):
"""Reads input YAML filename and returns a dictionary in which each key
is a category name of runtime environment and the corresponding value
is an object that includes version information of packages listed in
that category.
"""
runtime_env = dict()
with open(yml_filename) as yml_fh:
pkgs = yaml.load(yml_fh)
for pkg_type, pkg_list in pkgs.items():
if pkg_type == "os_distribution":
value = get_os_distro()
elif pkg_type == "os_pkg":
value = get_os_pkgs(pkg_list)
elif pkg_type == "cmd_line":
value = get_cmd_lines(pkg_list)
elif pkg_type == "python":
value = get_pip_pkgs(pkg_list)
elif pkg_type == "R":
value = get_r_pkgs(pkg_list)
elif pkg_type == "checksum":
value = get_checksums(pkg_list)
else:
raise Exception("Unknown category in %s: %s" % (yml_filename, pkg_type))
runtime_env[pkg_type] = value
return runtime_env
def find_processor(enum_key):
"""Retursn either a newly created Processor record, or the one in
database that matches the current processor name, version and environment.
"""
name = ProcessorEnum[enum_key].value["name"]
docker_image = ProcessorEnum[enum_key].value["docker_img"]
# In current implementation, ALWAYS get the runtime environment.
yml_path = os.path.join(DIRNAME, ProcessorEnum[enum_key].value["yml_file"])
environment = get_runtime_env(yml_path)
obj, status = Processor.objects.get_or_create(
name=name, version=SYSTEM_VERSION, docker_image=docker_image, environment=environment
)
return obj
def handle_processor_exception(job_context, processor_key, ex):
err_str = "Failed to set processor: %s" % ex
logger.error(err_str, job_id=job_context["job"].id, processor=processor_key)
job_context["job"].failure_reason = err_str
job_context["success"] = False
return job_context
def cache_keys(*keys, work_dir_key="work_dir"):
""" Decorator to be applied to a pipeline function.
Returns a new function that calls the original one and caches the given
keys into the `work_dir`. On the next call it will load those keys (if they
exist) and add them to the job_context instead of executing the function. """
def inner(func):
# generate a unique name for the cache based on the pipeline name
# and the cached keys
cache_name = "__".join(list(keys) + [func.__name__])
def pipeline(job_context):
cache_path = os.path.join(job_context[work_dir_key], cache_name)
if os.path.exists(cache_path):
# cached values exist, load keys from cacke
try:
values = pickle.load(open(cache_path, "rb"))
return {**job_context, **values}
except:
# don't fail if we can't load the cache
logger.warning(
"Failed to load cached data for pipeline function.",
function_name=func.__name__,
keys=keys,
)
pass
# execute the actual function
job_context = func(job_context)
try:
# save cached data for the next run
values = {key: job_context[key] for key in keys}
pickle.dump(values, open(cache_path, "wb"))
except:
# don't fail if we can't save the cache
logger.warning(
"Failed to cache data for pipeline function.",
function_name=func.__name__,
keys=keys,
)
pass
return job_context
return pipeline
return inner
<|code_end|>
|
Too many emails when dataset fails to smash
### Context
I tried to download a dataset from refine.bio which failed. (Nov 19, 20)
### Problem or idea
I received 5 emails that day and 4 of them in a span of 20 minutes, all with the same failure reason.
For another one, I tried to download I got 3 emails in a span of 2 minutes.
I don't know if this is the normal behavior or there was something else going on that day which might have triggered the additional emails.
### Solution or next step
Users should receive one failure email.
| workers/data_refinery_workers/processors/smasher.py
<|code_start|># -*- coding: utf-8 -*-
import boto3
import csv
import os
import rpy2
import rpy2.robjects as ro
import shutil
import simplejson as json
import string
import warnings
import requests
import psutil
import logging
import time
from botocore.exceptions import ClientError
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from pathlib import Path
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as rlang
from rpy2.robjects.packages import importr
from sklearn import preprocessing
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
OriginalFile,
Pipeline,
SampleResultAssociation,
Dataset
)
from data_refinery_common.utils import get_env_variable, calculate_file_size, calculate_sha1
from data_refinery_workers.processors import utils, smashing_utils
from urllib.parse import quote
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BODY_HTML = Path('data_refinery_workers/processors/smasher_email.min.html').read_text().replace('\n', '')
BODY_ERROR_HTML = Path('data_refinery_workers/processors/smasher_email_error.min.html').read_text().replace('\n', '')
BYTES_IN_GB = 1024 * 1024 * 1024
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName('DEBUG'))
PROCESS_POOL_SIZE = max(1, int(psutil.cpu_count()/2 - 1))
SCALERS = {
'MINMAX': preprocessing.MinMaxScaler,
'STANDARD': preprocessing.StandardScaler,
'ROBUST': preprocessing.RobustScaler,
}
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message,
total_cpu=psutil.cpu_percent(),
process_ram=ram_in_GB,
job_id=job_id)
if start_time:
logger.debug('Duration: %s' % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _inner_join(job_context: Dict) -> pd.DataFrame:
"""Performs an inner join across the all_frames key of job_context.
Returns a dataframe, not the job_context.
TODO: This function should be mostly unnecessary now because we
pretty much do this in the smashing utils but I don't want to rip
it out right now .
"""
# Merge all of the frames we've gathered into a single big frame, skipping duplicates.
# TODO: If the very first frame is the wrong platform, are we boned?
merged = job_context['all_frames'][0]
i = 1
old_len_merged = len(merged)
merged_backup = merged
while i < len(job_context['all_frames']):
frame = job_context['all_frames'][i]
i = i + 1
if i % 1000 == 0:
logger.info("Smashing keyframe",
i=i,
job_id=job_context['job'].id)
# I'm not sure where these are sneaking in from, but we don't want them.
# Related: https://github.com/AlexsLemonade/refinebio/issues/390
breaker = False
for column in frame.columns:
if column in merged.columns:
breaker = True
if breaker:
logger.warning("Column repeated for smash job!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
column=column)
continue
# This is the inner join, the main "Smash" operation
merged = merged.merge(frame, how='inner', left_index=True, right_index=True)
new_len_merged = len(merged)
if new_len_merged < old_len_merged:
logger.warning("Dropped rows while smashing!",
dataset_id=job_context["dataset"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged
)
if new_len_merged == 0:
logger.warning("Skipping a bad merge frame!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
bad_frame_number=i,)
merged = merged_backup
new_len_merged = len(merged)
try:
job_context['unsmashable_files'].append(frame.columns[0])
except Exception:
# Something is really, really wrong with this frame.
pass
old_len_merged = len(merged)
merged_backup = merged
return merged
def process_frames_for_key(key: str,
input_files: List[ComputedFile],
job_context: Dict) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the key 'all_frames', a list of pandas
dataframes containing all the samples' data. Also adds the key
'unsmashable_files' containing a list of paths that were
determined to be unsmashable.
"""
job_context['original_merged'] = pd.DataFrame()
start_all_frames = log_state("Building list of all_frames key {}".format(key),
job_context["job"].id)
job_context['all_frames'] = []
for (computed_file, sample) in input_files:
frame_data = smashing_utils.process_frame(job_context["work_dir"],
computed_file,
sample.accession_code,
job_context['dataset'].aggregate_by)
if frame_data is not None:
job_context['all_frames'].append(frame_data)
else:
logger.warning('Unable to smash file',
computed_file=computed_file.id,
dataset_id=job_context['dataset'].id,
job_id=job_context["job"].id)
job_context['unsmashable_files'].append(computed_file.filename)
log_state("Finished building list of all_frames key {}".format(key),
job_context["job"].id,
start_all_frames)
return job_context
def _smash_key(job_context: Dict, key: str, input_files: List[ComputedFile]) -> Dict:
"""Smash all of the input files together for a given key.
Steps:
Combine common genes (pandas merge)
Transpose such that genes are columns (features)
Scale features with sci-kit learn
Transpose again such that samples are columns and genes are rows
"""
start_smash = log_state("start _smash_key for {}".format(key), job_context["job"].id)
# Check if we need to copy the quant.sf files
if job_context['dataset'].quant_sf_only:
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
samples = [sample for (_, sample) in input_files]
job_context['num_samples'] += smashing_utils.sync_quant_files(outfile_dir, samples)
# we ONLY want to give quant sf files to the user if that's what they requested
return job_context
job_context = process_frames_for_key(key, input_files, job_context)
if len(job_context['all_frames']) < 1:
logger.error("Was told to smash a key with no frames!",
job_id=job_context['job'].id,
key=key)
# TODO: is this the proper way to handle this? I can see us
# not wanting to fail an entire dataset because one experiment
# had a problem, but I also think it could be problematic to
# just skip an experiment and pretend nothing went wrong.
return job_context
merged = _inner_join(job_context)
job_context['original_merged'] = merged
log_state("end build all frames", job_context["job"].id, start_smash)
start_qn = log_state("start qn", job_context["job"].id, start_smash)
# Quantile Normalization
if job_context['dataset'].quantile_normalize:
try:
job_context['merged_no_qn'] = merged
job_context['organism'] = job_context['dataset'].get_samples().first().organism
job_context = smashing_utils.quantile_normalize(job_context)
merged = job_context.get('merged_qn', None)
# We probably don't have an QN target or there is another error,
# so let's fail gracefully.
assert merged is not None, "Problem occured during quantile normalization: No merged_qn"
except Exception as e:
logger.exception("Problem occured during quantile normalization",
dataset_id=job_context['dataset'].id,
processor_job_id=job_context["job"].id,
)
job_context['dataset'].success = False
if not job_context['job'].failure_reason:
job_context['job'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].save()
# Delay failing this pipeline until the failure notify has been sent
job_context['job'].success = False
job_context['failure_reason'] = str(e)
return job_context
# End QN
log_state("end qn", job_context["job"].id, start_qn)
# Transpose before scaling
# Do this even if we don't want to scale in case transpose
# modifies the data in any way. (Which it shouldn't but
# we're paranoid.)
# TODO: stop the paranoia because Josh has alleviated it.
transposed = merged.transpose()
start_scaler = log_state("starting scaler", job_context["job"].id)
# Scaler
if job_context['dataset'].scale_by != "NONE":
scale_funtion = SCALERS[job_context['dataset'].scale_by]
scaler = scale_funtion(copy=True)
scaler.fit(transposed)
scaled = pd.DataFrame(scaler.transform(transposed),
index=transposed.index,
columns=transposed.columns)
# Untranspose
untransposed = scaled.transpose()
else:
# Wheeeeeeeeeee
untransposed = transposed.transpose()
log_state("end scaler", job_context["job"].id, start_scaler)
# This is just for quality assurance in tests.
job_context['final_frame'] = untransposed
# Write to temp file with dataset UUID in filename.
subdir = ''
if job_context['dataset'].aggregate_by in ["SPECIES", "EXPERIMENT"]:
subdir = key
elif job_context['dataset'].aggregate_by == "ALL":
subdir = "ALL"
# Normalize the Header format
untransposed.index.rename('Gene', inplace=True)
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
outfile = outfile_dir + key + ".tsv"
job_context['smash_outfile'] = outfile
untransposed.to_csv(outfile, sep='\t', encoding='utf-8')
log_state("end _smash_key for {}".format(key), job_context["job"].id, start_smash)
return job_context
def _smash_all(job_context: Dict) -> Dict:
"""Perform smashing on all species/experiments in the dataset.
"""
start_smash = log_state("start smash", job_context["job"].id)
# We have already failed - return now so we can send our fail email.
if job_context['job'].success is False:
return job_context
try:
job_context['unsmashable_files'] = []
job_context['num_samples'] = 0
# Smash all of the sample sets
logger.debug("About to smash!",
dataset_count=len(job_context['dataset'].data),
job_id=job_context['job'].id)
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop('input_files').items():
job_context = _smash_key(job_context, key, input_files)
smashing_utils.write_non_data_files(job_context)
# Finally, compress all files into a zip
final_zip_base = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk)
shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
job_context["output_file"] = final_zip_base + ".zip"
except Exception as e:
logger.exception("Could not smash dataset.",
dataset_id=job_context['dataset'].id,
processor_job_id=job_context['job_id'],
num_input_files=job_context['num_input_files'])
job_context['dataset'].success = False
job_context['job'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].save()
# Delay failing this pipeline until the failure notify has been sent
job_context['job'].success = False
job_context['failure_reason'] = str(e)
return job_context
job_context['dataset'].success = True
job_context['dataset'].save()
logger.debug("Created smash output!",
archive_location=job_context["output_file"])
log_state("end smash", job_context["job"].id, start_smash);
return job_context
def _upload(job_context: Dict) -> Dict:
""" Uploads the result file to S3 and notifies user. """
# There has been a failure already, don't try to upload anything.
if not job_context.get("output_file", None):
logger.error("Was told to upload a smash result without an output_file.",
job_id=job_context['job'].id)
return job_context
try:
if job_context.get("upload", True) and settings.RUNNING_IN_CLOUD:
s3_client = boto3.client('s3')
# Note that file expiry is handled by the S3 object lifecycle,
# managed by terraform.
s3_client.upload_file(
job_context["output_file"],
RESULTS_BUCKET,
job_context["output_file"].split('/')[-1],
ExtraArgs={'ACL':'public-read'}
)
result_url = ("https://s3.amazonaws.com/" + RESULTS_BUCKET + "/" +
job_context["output_file"].split('/')[-1])
job_context["result_url"] = result_url
logger.debug("Result uploaded!",
result_url=job_context["result_url"]
)
job_context["dataset"].s3_bucket = RESULTS_BUCKET
job_context["dataset"].s3_key = job_context["output_file"].split('/')[-1]
job_context["dataset"].size_in_bytes = calculate_file_size(job_context["output_file"])
job_context["dataset"].sha1 = calculate_sha1(job_context["output_file"])
job_context["dataset"].save()
# File is uploaded, we can delete the local.
try:
os.remove(job_context["output_file"])
except OSError:
pass
except Exception as e:
logger.exception("Failed to upload smash result file.", file=job_context["output_file"])
job_context['job'].success = False
job_context['job'].failure_reason = "Failure reason: " + str(e)
# Delay failing this pipeline until the failure notify has been sent
# job_context['success'] = False
return job_context
def _notify(job_context: Dict) -> Dict:
""" Use AWS SES to notify a user of a smash result.. """
if not job_context.get("upload", True) or not settings.RUNNING_IN_CLOUD:
return job_context
# Send a notification to slack when a dataset fails to be processed
if job_context['job'].success is False:
try:
_notify_slack_failed_dataset(job_context)
except Exception as e:
logger.warn(e) # It doesn't really matter if this didn't work
# Don't send an email if we don't have address.
if job_context["dataset"].email_address:
# Try to send the email.
try:
_notify_send_email(job_context)
# Display an error if something goes wrong.
except ClientError as e:
raise utils.ProcessorJobError('ClientError while notifying',
success=False,
exc_info=1,
client_error_message=e.response['Error']['Message'])
except Exception as e:
raise utils.ProcessorJobError('General failure when trying to send email.',
success=False,
exc_info=1,
result_url=job_context['result_url'])
return job_context
def _notify_slack_failed_dataset(job_context: Dict):
""" Send a slack notification when a dataset fails to smash """
# Link to the dataset page, where the user can re-try the download job
dataset_url = 'https://www.refine.bio/dataset/' + str(job_context['dataset'].id)
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
'channel': 'ccdl-general', # Move to robots when we get sick of these
'username': 'EngagementBot',
'icon_emoji': ':halal:',
'fallback': 'Dataset failed processing.',
'title': 'Dataset failed processing',
'title_link': dataset_url,
'attachments':[
{
'color': 'warning',
'text': job_context['job'].failure_reason,
'author_name': job_context['dataset'].email_address,
'fields': [
{
'title': 'Dataset id',
'value': str(job_context['dataset'].id)
}
]
}
]
},
headers={'Content-Type': 'application/json'},
timeout=10
)
def _notify_send_email(job_context):
""" Send email notification to the user if the dataset succeded or failed. """
dataset_url = 'https://www.refine.bio/dataset/' + str(job_context['dataset'].id)
SENDER = "Refine.bio Mail Robot <noreply@refine.bio>"
RECIPIENT = job_context["dataset"].email_address
AWS_REGION = "us-east-1"
CHARSET = "UTF-8"
if job_context['job'].success is False:
SUBJECT = "There was a problem processing your refine.bio dataset :("
BODY_TEXT = "We tried but were unable to process your requested dataset. Error was: \n\n" + str(job_context['job'].failure_reason) + "\nDataset ID: " + str(job_context['dataset'].id) + "\n We have been notified and are looking into the problem. \n\nSorry!"
ERROR_EMAIL_TITLE = quote('I can\'t download my dataset')
ERROR_EMAIL_BODY = quote("""
[What browser are you using?]
[Add details of the issue you are facing]
---
""" + str(job_context['dataset'].id))
FORMATTED_HTML = BODY_ERROR_HTML.replace('REPLACE_DATASET_URL', dataset_url)\
.replace('REPLACE_ERROR_TEXT', job_context['job'].failure_reason)\
.replace('REPLACE_NEW_ISSUE', 'https://github.com/AlexsLemonade/refinebio/issues/new?title={0}&body={1}&labels=bug'.format(ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY))\
.replace('REPLACE_MAILTO', 'mailto:ccdl@alexslemonade.org?subject={0}&body={1}'.format(ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY))
job_context['success'] = False
else:
SUBJECT = "Your refine.bio Dataset is Ready!"
BODY_TEXT = "Hot off the presses:\n\n" + dataset_url + "\n\nLove!,\nThe refine.bio Team"
FORMATTED_HTML = BODY_HTML.replace('REPLACE_DOWNLOAD_URL', dataset_url)\
.replace('REPLACE_DATASET_URL', dataset_url)
# Create a new SES resource and specify a region.
client = boto3.client('ses', region_name=AWS_REGION)
#Provide the contents of the email.
response = client.send_email(
Destination={
'ToAddresses': [
RECIPIENT,
],
},
Message={
'Body': {
'Html': {
'Charset': CHARSET,
'Data': FORMATTED_HTML,
},
'Text': {
'Charset': CHARSET,
'Data': BODY_TEXT,
},
},
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
}
},
Source=SENDER,
)
def _update_result_objects(job_context: Dict) -> Dict:
"""Closes out the dataset object."""
dataset = job_context["dataset"]
dataset.is_processing = False
dataset.is_processed = True
dataset.is_available = True
dataset.expires_on = timezone.now() + timedelta(days=7)
dataset.save()
job_context['success'] = True
return job_context
def smash(job_id: int, upload=True) -> None:
""" Main Smasher interface """
pipeline = Pipeline(name=PipelineEnum.SMASHER.value)
job_context = utils.run_pipeline({"job_id": job_id,
"upload": upload,
"pipeline": pipeline},
[utils.start_job,
smashing_utils.prepare_files,
_smash_all,
_upload,
_update_result_objects,
utils.end_job])
# ensure that `notify` is always called so that users get emails in case processing fails or succeeds
job_context = _notify(job_context)
return job_context
<|code_end|>
| workers/data_refinery_workers/processors/smasher.py
<|code_start|># -*- coding: utf-8 -*-
import boto3
import csv
import os
import rpy2
import rpy2.robjects as ro
import shutil
import simplejson as json
import string
import warnings
import requests
import psutil
import logging
import time
from botocore.exceptions import ClientError
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from pathlib import Path
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as rlang
from rpy2.robjects.packages import importr
from sklearn import preprocessing
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from data_refinery_common.job_lookup import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
OriginalFile,
Pipeline,
SampleResultAssociation,
Dataset
)
from data_refinery_common.utils import get_env_variable, calculate_file_size, calculate_sha1
from data_refinery_workers.processors import utils, smashing_utils
from urllib.parse import quote
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BODY_HTML = Path('data_refinery_workers/processors/smasher_email.min.html').read_text().replace('\n', '')
BODY_ERROR_HTML = Path('data_refinery_workers/processors/smasher_email_error.min.html').read_text().replace('\n', '')
BYTES_IN_GB = 1024 * 1024 * 1024
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName('DEBUG'))
PROCESS_POOL_SIZE = max(1, int(psutil.cpu_count()/2 - 1))
SCALERS = {
'MINMAX': preprocessing.MinMaxScaler,
'STANDARD': preprocessing.StandardScaler,
'ROBUST': preprocessing.RobustScaler,
}
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message,
total_cpu=psutil.cpu_percent(),
process_ram=ram_in_GB,
job_id=job_id)
if start_time:
logger.debug('Duration: %s' % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _inner_join(job_context: Dict) -> pd.DataFrame:
"""Performs an inner join across the all_frames key of job_context.
Returns a dataframe, not the job_context.
TODO: This function should be mostly unnecessary now because we
pretty much do this in the smashing utils but I don't want to rip
it out right now .
"""
# Merge all of the frames we've gathered into a single big frame, skipping duplicates.
# TODO: If the very first frame is the wrong platform, are we boned?
merged = job_context['all_frames'][0]
i = 1
old_len_merged = len(merged)
merged_backup = merged
while i < len(job_context['all_frames']):
frame = job_context['all_frames'][i]
i = i + 1
if i % 1000 == 0:
logger.info("Smashing keyframe",
i=i,
job_id=job_context['job'].id)
# I'm not sure where these are sneaking in from, but we don't want them.
# Related: https://github.com/AlexsLemonade/refinebio/issues/390
breaker = False
for column in frame.columns:
if column in merged.columns:
breaker = True
if breaker:
logger.warning("Column repeated for smash job!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
column=column)
continue
# This is the inner join, the main "Smash" operation
merged = merged.merge(frame, how='inner', left_index=True, right_index=True)
new_len_merged = len(merged)
if new_len_merged < old_len_merged:
logger.warning("Dropped rows while smashing!",
dataset_id=job_context["dataset"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged
)
if new_len_merged == 0:
logger.warning("Skipping a bad merge frame!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
bad_frame_number=i,)
merged = merged_backup
new_len_merged = len(merged)
try:
job_context['unsmashable_files'].append(frame.columns[0])
except Exception:
# Something is really, really wrong with this frame.
pass
old_len_merged = len(merged)
merged_backup = merged
return merged
def process_frames_for_key(key: str,
input_files: List[ComputedFile],
job_context: Dict) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the key 'all_frames', a list of pandas
dataframes containing all the samples' data. Also adds the key
'unsmashable_files' containing a list of paths that were
determined to be unsmashable.
"""
job_context['original_merged'] = pd.DataFrame()
start_all_frames = log_state("Building list of all_frames key {}".format(key),
job_context["job"].id)
job_context['all_frames'] = []
for (computed_file, sample) in input_files:
frame_data = smashing_utils.process_frame(job_context["work_dir"],
computed_file,
sample.accession_code,
job_context['dataset'].aggregate_by)
if frame_data is not None:
job_context['all_frames'].append(frame_data)
else:
logger.warning('Unable to smash file',
computed_file=computed_file.id,
dataset_id=job_context['dataset'].id,
job_id=job_context["job"].id)
job_context['unsmashable_files'].append(computed_file.filename)
log_state("Finished building list of all_frames key {}".format(key),
job_context["job"].id,
start_all_frames)
return job_context
def _smash_key(job_context: Dict, key: str, input_files: List[ComputedFile]) -> Dict:
"""Smash all of the input files together for a given key.
Steps:
Combine common genes (pandas merge)
Transpose such that genes are columns (features)
Scale features with sci-kit learn
Transpose again such that samples are columns and genes are rows
"""
start_smash = log_state("start _smash_key for {}".format(key), job_context["job"].id)
# Check if we need to copy the quant.sf files
if job_context['dataset'].quant_sf_only:
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
samples = [sample for (_, sample) in input_files]
job_context['num_samples'] += smashing_utils.sync_quant_files(outfile_dir, samples)
# we ONLY want to give quant sf files to the user if that's what they requested
return job_context
job_context = process_frames_for_key(key, input_files, job_context)
if len(job_context['all_frames']) < 1:
logger.error("Was told to smash a key with no frames!",
job_id=job_context['job'].id,
key=key)
# TODO: is this the proper way to handle this? I can see us
# not wanting to fail an entire dataset because one experiment
# had a problem, but I also think it could be problematic to
# just skip an experiment and pretend nothing went wrong.
return job_context
merged = _inner_join(job_context)
job_context['original_merged'] = merged
log_state("end build all frames", job_context["job"].id, start_smash)
start_qn = log_state("start qn", job_context["job"].id, start_smash)
# Quantile Normalization
if job_context['dataset'].quantile_normalize:
try:
job_context['merged_no_qn'] = merged
job_context['organism'] = job_context['dataset'].get_samples().first().organism
job_context = smashing_utils.quantile_normalize(job_context)
merged = job_context.get('merged_qn', None)
# We probably don't have an QN target or there is another error,
# so let's fail gracefully.
assert merged is not None, "Problem occured during quantile normalization: No merged_qn"
except Exception as e:
logger.exception("Problem occured during quantile normalization",
dataset_id=job_context['dataset'].id,
processor_job_id=job_context["job"].id,
)
job_context['dataset'].success = False
if not job_context['job'].failure_reason:
job_context['job'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].save()
# Delay failing this pipeline until the failure notify has been sent
job_context['job'].success = False
job_context['failure_reason'] = str(e)
return job_context
# End QN
log_state("end qn", job_context["job"].id, start_qn)
# Transpose before scaling
# Do this even if we don't want to scale in case transpose
# modifies the data in any way. (Which it shouldn't but
# we're paranoid.)
# TODO: stop the paranoia because Josh has alleviated it.
transposed = merged.transpose()
start_scaler = log_state("starting scaler", job_context["job"].id)
# Scaler
if job_context['dataset'].scale_by != "NONE":
scale_funtion = SCALERS[job_context['dataset'].scale_by]
scaler = scale_funtion(copy=True)
scaler.fit(transposed)
scaled = pd.DataFrame(scaler.transform(transposed),
index=transposed.index,
columns=transposed.columns)
# Untranspose
untransposed = scaled.transpose()
else:
# Wheeeeeeeeeee
untransposed = transposed.transpose()
log_state("end scaler", job_context["job"].id, start_scaler)
# This is just for quality assurance in tests.
job_context['final_frame'] = untransposed
# Write to temp file with dataset UUID in filename.
subdir = ''
if job_context['dataset'].aggregate_by in ["SPECIES", "EXPERIMENT"]:
subdir = key
elif job_context['dataset'].aggregate_by == "ALL":
subdir = "ALL"
# Normalize the Header format
untransposed.index.rename('Gene', inplace=True)
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
outfile = outfile_dir + key + ".tsv"
job_context['smash_outfile'] = outfile
untransposed.to_csv(outfile, sep='\t', encoding='utf-8')
log_state("end _smash_key for {}".format(key), job_context["job"].id, start_smash)
return job_context
def _smash_all(job_context: Dict) -> Dict:
"""Perform smashing on all species/experiments in the dataset.
"""
start_smash = log_state("start smash", job_context["job"].id)
# We have already failed - return now so we can send our fail email.
if job_context['job'].success is False:
return job_context
try:
job_context['unsmashable_files'] = []
job_context['num_samples'] = 0
# Smash all of the sample sets
logger.debug("About to smash!",
dataset_count=len(job_context['dataset'].data),
job_id=job_context['job'].id)
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop('input_files').items():
job_context = _smash_key(job_context, key, input_files)
smashing_utils.write_non_data_files(job_context)
# Finally, compress all files into a zip
final_zip_base = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk)
shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
job_context["output_file"] = final_zip_base + ".zip"
except Exception as e:
logger.exception("Could not smash dataset.",
dataset_id=job_context['dataset'].id,
processor_job_id=job_context['job_id'],
num_input_files=job_context['num_input_files'])
job_context['dataset'].success = False
job_context['job'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].save()
# Delay failing this pipeline until the failure notify has been sent
job_context['job'].success = False
job_context['failure_reason'] = str(e)
return job_context
job_context['dataset'].success = True
job_context['dataset'].save()
logger.debug("Created smash output!",
archive_location=job_context["output_file"])
log_state("end smash", job_context["job"].id, start_smash);
return job_context
def _upload(job_context: Dict) -> Dict:
""" Uploads the result file to S3 and notifies user. """
# There has been a failure already, don't try to upload anything.
if not job_context.get("output_file", None):
logger.error("Was told to upload a smash result without an output_file.",
job_id=job_context['job'].id)
return job_context
try:
if job_context.get("upload", True) and settings.RUNNING_IN_CLOUD:
s3_client = boto3.client('s3')
# Note that file expiry is handled by the S3 object lifecycle,
# managed by terraform.
s3_client.upload_file(
job_context["output_file"],
RESULTS_BUCKET,
job_context["output_file"].split('/')[-1],
ExtraArgs={'ACL':'public-read'}
)
result_url = ("https://s3.amazonaws.com/" + RESULTS_BUCKET + "/" +
job_context["output_file"].split('/')[-1])
job_context["result_url"] = result_url
logger.debug("Result uploaded!",
result_url=job_context["result_url"]
)
job_context["dataset"].s3_bucket = RESULTS_BUCKET
job_context["dataset"].s3_key = job_context["output_file"].split('/')[-1]
job_context["dataset"].size_in_bytes = calculate_file_size(job_context["output_file"])
job_context["dataset"].sha1 = calculate_sha1(job_context["output_file"])
job_context["dataset"].save()
# File is uploaded, we can delete the local.
try:
os.remove(job_context["output_file"])
except OSError:
pass
except Exception as e:
logger.exception("Failed to upload smash result file.", file=job_context["output_file"])
job_context['job'].success = False
job_context['job'].failure_reason = "Failure reason: " + str(e)
# Delay failing this pipeline until the failure notify has been sent
# job_context['success'] = False
return job_context
def _notify(job_context: Dict) -> Dict:
""" Use AWS SES to notify a user of a smash result.. """
if not job_context.get("upload", True) or not settings.RUNNING_IN_CLOUD:
return job_context
# Send a notification to slack when a dataset fails to be processed
if job_context['job'].success is False:
try:
_notify_slack_failed_dataset(job_context)
except Exception as e:
logger.warn(e) # It doesn't really matter if this didn't work
# Don't send an email if we don't have address.
if job_context["dataset"].email_address:
# Try to send the email.
try:
_notify_send_email(job_context)
# Display an error if something goes wrong.
except ClientError as e:
raise utils.ProcessorJobError('ClientError while notifying',
success=False,
exc_info=1,
client_error_message=e.response['Error']['Message'])
except Exception as e:
raise utils.ProcessorJobError('General failure when trying to send email.',
success=False,
exc_info=1,
result_url=job_context['result_url'])
# We don't want to retry this dataset after we send a notification to users
# https://github.com/alexslemonade/refinebio/issues/1944
job_context['job'].no_retry = True
job_context['job'].save()
return job_context
def _notify_slack_failed_dataset(job_context: Dict):
""" Send a slack notification when a dataset fails to smash """
# Link to the dataset page, where the user can re-try the download job
dataset_url = 'https://www.refine.bio/dataset/' + str(job_context['dataset'].id)
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
'channel': 'ccdl-general', # Move to robots when we get sick of these
'username': 'EngagementBot',
'icon_emoji': ':halal:',
'fallback': 'Dataset failed processing.',
'title': 'Dataset failed processing',
'title_link': dataset_url,
'attachments':[
{
'color': 'warning',
'text': job_context['job'].failure_reason,
'author_name': job_context['dataset'].email_address,
'fields': [
{
'title': 'Dataset id',
'value': str(job_context['dataset'].id)
}
]
}
]
},
headers={'Content-Type': 'application/json'},
timeout=10
)
def _notify_send_email(job_context):
""" Send email notification to the user if the dataset succeded or failed. """
dataset_url = 'https://www.refine.bio/dataset/' + str(job_context['dataset'].id)
SENDER = "Refine.bio Mail Robot <noreply@refine.bio>"
RECIPIENT = job_context["dataset"].email_address
AWS_REGION = "us-east-1"
CHARSET = "UTF-8"
if job_context['job'].success is False:
SUBJECT = "There was a problem processing your refine.bio dataset :("
BODY_TEXT = "We tried but were unable to process your requested dataset. Error was: \n\n" + str(job_context['job'].failure_reason) + "\nDataset ID: " + str(job_context['dataset'].id) + "\n We have been notified and are looking into the problem. \n\nSorry!"
ERROR_EMAIL_TITLE = quote('I can\'t download my dataset')
ERROR_EMAIL_BODY = quote("""
[What browser are you using?]
[Add details of the issue you are facing]
---
""" + str(job_context['dataset'].id))
FORMATTED_HTML = BODY_ERROR_HTML.replace('REPLACE_DATASET_URL', dataset_url)\
.replace('REPLACE_ERROR_TEXT', job_context['job'].failure_reason)\
.replace('REPLACE_NEW_ISSUE', 'https://github.com/AlexsLemonade/refinebio/issues/new?title={0}&body={1}&labels=bug'.format(ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY))\
.replace('REPLACE_MAILTO', 'mailto:ccdl@alexslemonade.org?subject={0}&body={1}'.format(ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY))
job_context['success'] = False
else:
SUBJECT = "Your refine.bio Dataset is Ready!"
BODY_TEXT = "Hot off the presses:\n\n" + dataset_url + "\n\nLove!,\nThe refine.bio Team"
FORMATTED_HTML = BODY_HTML.replace('REPLACE_DOWNLOAD_URL', dataset_url)\
.replace('REPLACE_DATASET_URL', dataset_url)
# Create a new SES resource and specify a region.
client = boto3.client('ses', region_name=AWS_REGION)
#Provide the contents of the email.
response = client.send_email(
Destination={
'ToAddresses': [
RECIPIENT,
],
},
Message={
'Body': {
'Html': {
'Charset': CHARSET,
'Data': FORMATTED_HTML,
},
'Text': {
'Charset': CHARSET,
'Data': BODY_TEXT,
},
},
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
}
},
Source=SENDER,
)
def _update_result_objects(job_context: Dict) -> Dict:
"""Closes out the dataset object."""
dataset = job_context["dataset"]
dataset.is_processing = False
dataset.is_processed = True
dataset.is_available = True
dataset.expires_on = timezone.now() + timedelta(days=7)
dataset.save()
job_context['success'] = True
return job_context
def smash(job_id: int, upload=True) -> None:
""" Main Smasher interface """
pipeline = Pipeline(name=PipelineEnum.SMASHER.value)
job_context = utils.run_pipeline({"job_id": job_id,
"upload": upload,
"pipeline": pipeline},
[utils.start_job,
smashing_utils.prepare_files,
_smash_all,
_upload,
_update_result_objects,
utils.end_job])
# ensure that `notify` is always called so that users get emails in case processing fails or succeeds
job_context = _notify(job_context)
return job_context
<|code_end|>
|
Transcriptome indices won't make both long and short versions
### Context
I'm pretty sure this is an issue that should affect prod. I saw it locally as part of #2035. While I did get transcriptome indices made successfully, I didn't look hard enough and it appears that only one of the two jobs succeeded because after the first the input files are deleted and the second job doesn't have the files it needs.
### Problem or idea
This was working at some point. I can't figure out exactly where things went wrong, but my guess is that we moved the file deletion into the `end_job` and didn't properly make sure to protect the transcriptome-specific files at least until the second job ran.
### Solution or next step
I think we need to add a special case to `processor.utils.end_job` to make sure that there's no other processor jobs associated with the file before deleting it.
| foreman/data_refinery_foreman/surveyor/transcriptome_index.py
<|code_start|>import re
import urllib
from abc import ABC
from django.utils import timezone
from typing import List, Dict
from data_refinery_common.job_lookup import ProcessorPipeline, Downloaders
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
OriginalFile,
SurveyJobKeyValue,
)
from data_refinery_foreman.surveyor import utils
from data_refinery_foreman.surveyor.external_source import ExternalSourceSurveyor
logger = get_and_configure_logger(__name__)
MAIN_DIVISION_URL_TEMPLATE = "https://rest.ensembl.org/info/species?content-type=application/json"
DIVISION_URL_TEMPLATE = (
"https://rest.ensembl.org/info/genomes/division/{division}" "?content-type=application/json"
)
TRANSCRIPTOME_URL_TEMPLATE = (
"ftp://ftp.{url_root}/fasta/{species_sub_dir}/dna/"
"{filename_species}.{assembly}.dna.{schema_type}.fa.gz"
)
GTF_URL_TEMPLATE = (
"ftp://ftp.{url_root}/gtf/{species_sub_dir}/"
"{filename_species}.{assembly}.{assembly_version}.gtf.gz"
)
# For whatever reason the division in the download URL is shortened in
# a way that doesn't seem to be discoverable programmatically. I've
# therefore created this lookup map:
DIVISION_LOOKUP = {
"EnsemblPlants": "plants",
"EnsemblFungi": "fungi",
"EnsemblBacteria": "bacteria",
"EnsemblProtists": "protists",
"EnsemblMetazoa": "metazoa",
}
# Ensembl will periodically release updated versions of the
# assemblies. All divisions other than the main one have identical
# release versions. These urls will return what the most recent
# release version is.
MAIN_RELEASE_URL = "https://rest.ensembl.org/info/software?content-type=application/json"
DIVISION_RELEASE_URL = "https://rest.ensembl.org/info/eg_version?content-type=application/json"
class EnsemblUrlBuilder(ABC):
"""Generates URLs for different divisions of Ensembl.
Each division of Ensembl has different conventions for its
URLs. The logic contained in the init method of this base class is
appropriate for most, but not all of the divisions. However, the
logic contained in the build_* methods of this class is
appropriate for all divisions.
"""
def __init__(self, species: Dict):
"""Species is a Dict containing parsed JSON from the Division API."""
self.url_root = "ensemblgenomes.org/pub/release-{assembly_version}/{short_division}"
self.short_division = DIVISION_LOOKUP[species["division"]]
self.assembly = species["assembly_name"].replace(" ", "_")
self.assembly_version = (
utils.requests_retry_session().get(DIVISION_RELEASE_URL).json()["version"]
)
self.species_sub_dir = species["name"]
self.filename_species = species["name"].capitalize()
# These fields aren't needed for the URL, but they vary between
# the two REST APIs.
self.scientific_name = species["name"].upper()
self.taxonomy_id = species["taxonomy_id"]
def build_transcriptome_url(self) -> str:
url_root = self.url_root.format(
assembly_version=self.assembly_version, short_division=self.short_division
)
url = TRANSCRIPTOME_URL_TEMPLATE.format(
url_root=url_root,
species_sub_dir=self.species_sub_dir,
filename_species=self.filename_species,
assembly=self.assembly,
schema_type="primary_assembly",
)
# If the primary_assembly is not available use toplevel instead.
try:
# Ancient unresolved bug. WTF python: https://bugs.python.org/issue27973
urllib.request.urlcleanup()
file_handle = urllib.request.urlopen(url)
file_handle.close()
urllib.request.urlcleanup()
except:
url = url.replace("primary_assembly", "toplevel")
return url
def build_gtf_url(self) -> str:
url_root = self.url_root.format(
assembly_version=self.assembly_version, short_division=self.short_division
)
return GTF_URL_TEMPLATE.format(
url_root=url_root,
species_sub_dir=self.species_sub_dir,
filename_species=self.filename_species,
assembly=self.assembly,
assembly_version=self.assembly_version,
)
class MainEnsemblUrlBuilder(EnsemblUrlBuilder):
"""Special logic specific to the main Ensembl division.
There is one Ensembl division which is just called Ensembl. This
is confusing so I refer to it as the main Ensembl division. It
follows the same general pattern as the rest of them for URLs, but
just not quite the same base URL structure. Also its REST API
returns JSON with similar data except with slightly different key
names.
"""
def __init__(self, species: Dict):
self.url_root = "ensembl.org/pub/release-{assembly_version}"
self.short_division = None
self.species_sub_dir = species["name"]
self.filename_species = species["name"].capitalize()
self.assembly = species["assembly"]
self.assembly_version = (
utils.requests_retry_session().get(MAIN_RELEASE_URL).json()["release"]
)
self.scientific_name = self.filename_species.replace("_", " ")
self.taxonomy_id = species["taxon_id"]
class EnsemblProtistsUrlBuilder(EnsemblUrlBuilder):
"""Special logic specific to the EnsemblProtists division.
EnsemblProtists is special because the first letter of the species
name is always capitalized within the name of the file, instead of
only when there's not a collection subnested.
"""
def __init__(self, species: Dict):
super().__init__(species)
self.filename_species = species["species"].capitalize()
class EnsemblFungiUrlBuilder(EnsemblProtistsUrlBuilder):
"""The EnsemblFungi URLs work the similarly to Protists division.
EnsemblFungi is special because there is an assembly_name TIGR
which needs to be corrected to CADRE for some reason.
"""
def __init__(self, species: Dict):
super().__init__(species)
if self.assembly == "TIGR":
self.assembly = "CADRE"
def ensembl_url_builder_factory(species: Dict) -> EnsemblUrlBuilder:
"""Returns instance of EnsemblUrlBuilder or one of its subclasses.
The class of the returned object is based on the species' division.
"""
if species["division"] == "EnsemblProtists":
return EnsemblProtistsUrlBuilder(species)
elif species["division"] == "EnsemblFungi":
return EnsemblFungiUrlBuilder(species)
elif species["division"] == "EnsemblVertebrates":
return MainEnsemblUrlBuilder(species)
else:
return EnsemblUrlBuilder(species)
class TranscriptomeIndexSurveyor(ExternalSourceSurveyor):
def source_type(self):
return Downloaders.TRANSCRIPTOME_INDEX.value
def _clean_metadata(self, species: Dict) -> Dict:
"""Removes fields from metadata which shouldn't be stored.
Also cast any None values to str so they can be stored in the
database.
These fields shouldn't be stored because:
The taxonomy id is stored as fields on the Organism.
Aliases and groups are lists we don't need.
"""
species.pop("taxon_id") if "taxon_id" in species else None
species.pop("taxonomy_id") if "taxonomy_id" in species else None
species.pop("aliases") if "aliases" in species else None
species.pop("groups") if "groups" in species else None
# Cast to List since we're modifying the size of the dict
# while iterating over it
for k, v in list(species.items()):
if v is None:
species.pop(k)
else:
species[k] = str(v)
return species
def _generate_files(self, species: Dict) -> None:
url_builder = ensembl_url_builder_factory(species)
fasta_download_url = url_builder.build_transcriptome_url()
gtf_download_url = url_builder.build_gtf_url()
platform_accession_code = species.pop("division")
self._clean_metadata(species)
all_new_files = []
fasta_filename = url_builder.filename_species + ".fa.gz"
original_file = OriginalFile()
original_file.source_filename = fasta_filename
original_file.source_url = fasta_download_url
original_file.is_archive = True
original_file.is_downloaded = False
original_file.save()
all_new_files.append(original_file)
gtf_filename = url_builder.filename_species + ".gtf.gz"
original_file = OriginalFile()
original_file.source_filename = gtf_filename
original_file.source_url = gtf_download_url
original_file.is_archive = True
original_file.is_downloaded = False
original_file.save()
all_new_files.append(original_file)
return all_new_files
def survey(self, source_type=None) -> bool:
"""
Surveying here is a bit different than discovering an experiment
and samples.
"""
if source_type != "TRANSCRIPTOME_INDEX":
return False
try:
species_files = self.discover_species()
except Exception:
logger.exception(
("Exception caught while discovering species. " "Terminating survey job."),
survey_job=self.survey_job.id,
)
return False
try:
for specie_file_list in species_files:
self.queue_downloader_job_for_original_files(
specie_file_list, is_transcriptome=True
)
except Exception:
logger.exception(
("Failed to queue downloader jobs. " "Terminating survey job."),
survey_job=self.survey_job.id,
)
return False
return True
def discover_species(self):
ensembl_division = SurveyJobKeyValue.objects.get(
survey_job_id=self.survey_job.id, key__exact="ensembl_division"
).value
logger.info(
"Surveying %s division of ensembl.", ensembl_division, survey_job=self.survey_job.id
)
# The main division has a different base URL for its REST API.
if ensembl_division == "Ensembl":
r = utils.requests_retry_session().get(MAIN_DIVISION_URL_TEMPLATE)
# Yes I'm aware that specieses isn't a word. However I need to
# distinguish between a singlular species and multiple species.
specieses = r.json()["species"]
else:
r = utils.requests_retry_session().get(
DIVISION_URL_TEMPLATE.format(division=ensembl_division)
)
specieses = r.json()
try:
organism_name = SurveyJobKeyValue.objects.get(
survey_job_id=self.survey_job.id, key__exact="organism_name"
).value
organism_name = organism_name.lower().replace(" ", "_")
except SurveyJobKeyValue.DoesNotExist:
organism_name = None
all_new_species = []
if organism_name:
for species in specieses:
# This key varies based on whether the division is the
# main one or not... why couldn't they just make them
# consistent?
if ("species" in species and species["species"] == organism_name) or (
"name" in species and species["name"] == organism_name
):
all_new_species.append(self._generate_files(species))
break
else:
for species in specieses:
all_new_species.append(self._generate_files(species))
if len(all_new_species) == 0:
logger.error(
"Unable to find any species!",
ensembl_division=ensembl_division,
organism_name=organism_name,
)
return all_new_species
<|code_end|>
workers/data_refinery_workers/processors/utils.py
<|code_start|>import os
import random
import shutil
import signal
import string
import subprocess
import sys
import yaml
import pickle
from django.conf import settings
from django.utils import timezone
from typing import List, Dict, Callable
from data_refinery_common.job_lookup import ProcessorEnum, ProcessorPipeline, SMASHER_JOB_TYPES
from data_refinery_common.job_management import create_downloader_job
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
Dataset,
DownloaderJob,
DownloaderJobOriginalFileAssociation,
OriginalFile,
OriginalFileSampleAssociation,
Pipeline,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
ProcessorJobOriginalFileAssociation,
Sample,
)
from data_refinery_common.utils import (
get_env_variable,
get_env_variable_gracefully,
get_instance_id,
)
logger = get_and_configure_logger(__name__)
# Let this fail if SYSTEM_VERSION is unset.
SYSTEM_VERSION = get_env_variable("SYSTEM_VERSION")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
S3_QN_TARGET_BUCKET_NAME = get_env_variable("S3_QN_TARGET_BUCKET_NAME", "data-refinery")
DIRNAME = os.path.dirname(os.path.abspath(__file__))
CURRENT_JOB = None
def signal_handler(sig, frame):
"""Signal Handler, works for both SIGTERM and SIGINT"""
global CURRENT_JOB
if CURRENT_JOB:
CURRENT_JOB.success = False
CURRENT_JOB.end_time = timezone.now()
CURRENT_JOB.num_retries = CURRENT_JOB.num_retries - 1
CURRENT_JOB.failure_reason = "Interruped by SIGTERM/SIGINT: " + str(sig)
CURRENT_JOB.save()
sys.exit(0)
def prepare_original_files(job_context):
""" Provision in the Job context for OriginalFile-driven processors
"""
job = job_context["job"]
original_files = job.original_files.all()
if original_files.count() == 0:
raise ProcessorJobError("No files were found for the job.", success=False)
undownloaded_files = set()
for original_file in original_files:
if original_file.needs_downloading(job_context["job_id"]):
if original_file.is_downloaded:
# If it needs to be downloaded then it's not
# downloaded and the is_downloaded field should stop
# lying about that.
original_file.is_downloaded = False
original_file.save()
undownloaded_files.add(original_file)
if undownloaded_files:
logger.info(
(
"One or more files found which were missing or not downloaded."
" Creating downloader jobs for them and deleting this job."
),
processor_job=job.id,
missing_files=list(undownloaded_files),
)
was_job_created = create_downloader_job(
undownloaded_files, processor_job_id=job_context["job_id"], force=True
)
if not was_job_created:
raise ProcessorJobError(
"Missing file for processor job but unable to recreate downloader jobs!",
success=False,
)
raise ProcessorJobError(
"We can not process the data because it is not on the disk",
success=False,
no_retry=True, # this job should not be retried again
abort=True, # abort the job and don't do anything else
undownloaded_files=[file.id for file in undownloaded_files],
)
job_context["original_files"] = original_files
first_original_file = original_files.first()
samples = Sample.objects.filter(original_files=first_original_file)
job_context["samples"] = samples
job_context["computed_files"] = []
return job_context
def prepare_dataset(job_context):
""" Provision in the Job context for Dataset-driven processors
"""
job = job_context["job"]
job_datasets = job.datasets.all()
# This should never be more than one!
if job_datasets.count() > 1:
raise ProcessorJobError(
"More than one dataset for processor job!", success=False, no_retry=True
)
elif job_datasets.count() == 0:
raise ProcessorJobError(
"No datasets found for processor job!", success=False, no_retry=True
)
dataset = job_datasets.first()
dataset.is_processing = True
dataset.save()
# Get the samples to smash
job_context["dataset"] = dataset
job_context["samples"] = dataset.get_aggregated_samples()
job_context["experiments"] = dataset.get_experiments()
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def start_job(job_context: Dict):
"""A processor function to start jobs.
Record in the database that this job is being started and
retrieves the job's batches from the database and adds them to the
dictionary passed in with the key 'batches'.
"""
job = job_context["job"]
original_file = job.original_files.first()
if (
not job.pipeline_applied == ProcessorPipeline.TXIMPORT.value
and original_file
and not original_file.needs_processing(job_context["job_id"])
):
failure_reason = (
"Sample has a good computed file, it must have been processed, "
"so it doesn't need to be downloaded! Aborting!"
)
logger.error(failure_reason, job_id=job.id, original_file=original_file)
job_context["original_files"] = []
job_context["computed_files"] = []
job_context["abort"] = True
# Will be saved by end_job.
job_context["job"].failure_reason = failure_reason
return job_context
# Set up the SIGTERM handler so we can appropriately handle being interrupted.
# (`docker stop` uses SIGTERM, not SIGINT.)
# (however, Nomad sends an SIGINT so catch both.)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# This job should not have been started, for some reason Nomad restarts some of our jobs
# https://github.com/AlexsLemonade/refinebio/issues/1487
if job.start_time is not None and settings.RUNNING_IN_CLOUD:
# Let's just log the event and let the job run instead of failing
# and also reset the endtime and failure reason, since those fields might have been set
logger.warn(
"ProcessorJob was restarted by Nomad. We do not know why this happened",
processor_job=job.id,
success=job.success,
failure_reason=job.failure_reason,
start_time=job.start_time,
end_time=job.end_time,
)
job.end_time = None
job.failure_reason = None
job.worker_id = get_instance_id()
job.worker_version = SYSTEM_VERSION
job.start_time = timezone.now()
job.save()
global CURRENT_JOB
CURRENT_JOB = job
logger.debug("Starting processor Job.", processor_job=job.id, pipeline=job.pipeline_applied)
# Janitor jobs don't operate on file objects.
# Tximport jobs don't need to download the original file, they
# just need it to know what experiment to process.
if job.pipeline_applied not in [
ProcessorPipeline.JANITOR.value,
ProcessorPipeline.TXIMPORT.value,
]:
# Some jobs take OriginalFiles, other take Datasets
if ProcessorPipeline[job.pipeline_applied] not in SMASHER_JOB_TYPES:
job_context = prepare_original_files(job_context)
if not job_context.get("success", True):
return job_context
else:
job_context = prepare_dataset(job_context)
if not job_context.get("success", True):
return job_context
else:
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def end_job(job_context: Dict, abort=False):
"""A processor function to end jobs.
Record in the database that this job has completed and that
the samples have been processed if not aborted.
"""
job = job_context["job"]
success = job_context.get("success", True)
# Upload first so if this fails we can set success = False and let
# the rest of the function mark it as failed.
if success:
# QN reference files go to a special bucket so they can be
# publicly available.
if job_context["job"].pipeline_applied == "QN_REFERENCE":
s3_bucket = S3_QN_TARGET_BUCKET_NAME
else:
s3_bucket = S3_BUCKET_NAME
# S3-sync Computed Files
for computed_file in job_context.get("computed_files", []):
# Ensure even distribution across S3 servers
nonce = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(24)
)
result = computed_file.sync_to_s3(s3_bucket, nonce + "_" + computed_file.filename)
if result and settings.RUNNING_IN_CLOUD:
computed_file.delete_local_file()
elif not result:
success = False
job_context["success"] = False
job.failure_reason = "Failed to upload computed file."
break
if not success:
for computed_file in job_context.get("computed_files", []):
computed_file.delete_local_file()
if computed_file.id:
computed_file.delete()
# if the processor job fails mark all datasets as failed
if ProcessorPipeline[job.pipeline_applied] in SMASHER_JOB_TYPES:
for dataset in job.datasets.all():
dataset.failure_reason = job.failure_reason
dataset.is_processing = False
dataset.save()
if not abort:
if job_context.get("success", False) and not (
job_context["job"].pipeline_applied
in [
ProcessorPipeline.SMASHER.value,
ProcessorPipeline.QN_REFERENCE.value,
ProcessorPipeline.CREATE_COMPENDIA.value,
ProcessorPipeline.CREATE_QUANTPENDIA.value,
ProcessorPipeline.JANITOR.value,
]
):
# Salmon requires the final `tximport` step to be fully `is_processed`.
mark_as_processed = True
if job_context["job"].pipeline_applied == "SALMON" and not job_context.get(
"tximported", False
):
mark_as_processed = False
if mark_as_processed:
# This handles most of our cases
unique_experiments = []
for sample in job_context.get("samples", []):
sample.is_processed = True
sample.save()
if sample.experiments.all().count() > 0:
unique_experiments = list(
set(unique_experiments + sample.experiments.all()[::1])
)
# Explicitly for the single-salmon scenario
if "sample" in job_context:
sample = job_context["sample"]
sample.is_processed = True
sample.save()
for experiment in unique_experiments:
experiment.update_num_samples()
# If we are aborting, it's because we want to do something
# different, so leave the original files so that "something
# different" can use them.
if (success or job.no_retry) and not abort:
# Cleanup Original Files
if "original_files" in job_context:
for original_file in job_context["original_files"]:
original_file.delete_local_file()
# If the pipeline includes any steps, save it.
if "pipeline" in job_context:
pipeline = job_context["pipeline"]
if len(pipeline.steps):
pipeline.save()
if (
"work_dir" in job_context
and job_context["job"].pipeline_applied != ProcessorPipeline.CREATE_COMPENDIA.value
and settings.RUNNING_IN_CLOUD
):
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
job.abort = abort
job.success = success
job.end_time = timezone.now()
job.save()
if success:
logger.debug(
"Processor job completed successfully.",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
)
else:
if not job.failure_reason:
logger.error(
"Processor job failed without having failure_reason set. FIX ME!!!!!!!!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
)
else:
logger.error(
"Processor job failed!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
failure_reason=job.failure_reason,
)
# Return Final Job context so testers can check it
return job_context
def run_pipeline(start_value: Dict, pipeline: List[Callable]):
"""Runs a pipeline of processor functions.
start_value must contain a key 'job_id' which is a valid id for a
ProcessorJob record.
Each processor fuction must accept a dictionary and return a
dictionary.
Any processor function which returns a dictionary containing a key
of 'success' with a value of False will cause the pipeline to
terminate with a call to utils.end_job.
The key 'job' is reserved for the ProcessorJob currently being
run. It is required that the dictionary returned by each
processor function preserve the mapping for 'job' that was passed
into it.
"""
job_id = start_value["job_id"]
try:
job = ProcessorJob.objects.get(id=job_id)
except ProcessorJob.DoesNotExist:
logger.error("Cannot find processor job record.", processor_job=job_id)
return
if len(pipeline) == 0:
logger.error("Empty pipeline specified.", procesor_job=job_id)
last_result = start_value
last_result["job"] = job
for processor in pipeline:
try:
last_result = processor(last_result)
except ProcessorJobError as e:
e.update_job(job)
logger.exception(e.failure_reason, processor_job=job.id, **e.context)
if e.success is False:
# end_job will use this and set the value
last_result["success"] = False
return end_job(last_result, abort=bool(e.abort))
except Exception as e:
failure_reason = (
"Unhandled exception caught while running processor" " function {} in pipeline: "
).format(processor.__name__)
logger.exception(failure_reason, no_retry=job.no_retry, processor_job=job_id)
last_result["success"] = False
last_result["job"].failure_reason = failure_reason + str(e)
return end_job(last_result)
if "success" in last_result and last_result["success"] is False:
logger.error(
"Processor function %s failed. Terminating pipeline.",
processor.__name__,
processor_job=job_id,
failure_reason=last_result["job"].failure_reason,
)
return end_job(last_result)
if last_result.get("abort", False):
return end_job(last_result, abort=True)
return last_result
class ProcessorJobError(Exception):
""" General processor job error class. """
def __init__(
self, failure_reason, *, success=None, no_retry=None, retried=None, abort=None, **context
):
super(ProcessorJobError, self).__init__(failure_reason)
self.failure_reason = failure_reason
self.success = success
self.no_retry = no_retry
self.retried = retried
self.abort = abort
# additional context to be included when logging
self.context = context
def update_job(self, job):
job.failure_reason = self.failure_reason
if self.success is not None:
job.success = self.success
if self.no_retry is not None:
job.no_retry = self.no_retry
if self.retried is not None:
job.retried = self.retried
if self.abort is not None:
job.abort = self.abort
job.save()
# also update the failure reason if this is a dataset's processor job
for dataset in job.datasets.all():
dataset.failure_reason = self.failure_reason
dataset.success = False
dataset.save()
def get_os_distro():
"""Returns a string of OS distribution.
Since we are using Docker, this function only considers Linux distribution.
Alternative files on Linux: /etc/os-release, /etc/lsb-release
As a matter of fact, "/etc/issue" doesn't exist on Mac OS X. We can use
"sw_vers" command to find its OS information.
A more cross-platform solution is using "platform" module in Python.
"""
with open("/etc/issue") as distro_fh:
return distro_fh.readline().strip("\l\n\\n ")
def get_os_pkgs(pkg_list):
"""Returns a dictionay in which each key is the name of an os-lvel
package and the corresponding value is the package's version.
This function assumes the package manager is Debian-based (dpkg/apt).
"""
pkg_info = dict()
for pkg in pkg_list:
process_done = subprocess.run(
["dpkg-query", "--show", pkg], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"OS-level package %s not found: %s" % (pkg, process_done.stderr.decode().strip())
)
version = process_done.stdout.decode().strip().split("\t")[-1]
pkg_info[pkg] = version
return pkg_info
def get_cmd_lines(cmd_list):
"""Returns a dictionary in which each key is a command string and
the corresponding value is the command's stripped output.
"""
cmd_info = dict()
for cmd in cmd_list:
process_done = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception(
"Failed to run command line '%s': %s" % (cmd, process_done.stderr.decode().strip())
)
output_bytes = process_done.stdout
# Workaround for the "salmontools --version"
# command, whose outputs are sent to stderr instead of stdout.
# Alternatively, we could have used "stderr=subprocess.STDOUT" when
# initializing process_done, but it is probably a better idea to
# keep stdout and stderr separate.
base_cmd = cmd.strip().split()[0]
if base_cmd == "salmontools":
output_bytes = process_done.stderr
cmd_output = output_bytes.decode().strip()
cmd_info[cmd] = cmd_output
return cmd_info
def get_pip_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a pip-installed
package and the corresponding value is the package's version.
Instead of using: `pip show pkg | grep Version | awk '{print $2}'` to get
each package's version, we save the output of `pip freeze` first, then
check the version of each input package in pkg_list. This approach
launches the subprocess only once and (hopefully) saves some computational
resource.
"""
process_done = subprocess.run(["pip", "freeze"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("'pip freeze' failed: %s" % process_done.stderr.decode().strip())
frozen_pkgs = dict()
for item in process_done.stdout.decode().split():
name, version = item.split("==")
frozen_pkgs[name] = version
pkg_info = dict()
for pkg in pkg_list:
try:
version = frozen_pkgs[pkg]
except KeyError:
raise Exception("Pip package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_bioc_version():
"""Returns a string that is the version of "Bioconductor" package in R.
Note that the data frame returned by installed.packages() does NOT include
a package named "Bioconductor", so we have to launch another R command to
find "Bioconductor" version.
"""
r_command = "tools:::.BioC_version_associated_with_R_version()"
process_done = subprocess.run(
["Rscript", "-e", r_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"R command failed to retrieve Bioconductor version: %s"
% process_done.stderr.decode().strip()
)
version = process_done.stdout.decode().strip().split()[-1]
version = version[1:-1] # Remove the leading and trailing non-ascii characters.
if len(version) == 0:
raise Exception("Bioconductor not found")
return version
def get_r_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a R package
and the corresponding value is the package's version.
"""
# Use "Rscript -e <R_commands>" command to get all user-installed R packages.
r_commands = "packages.df <- as.data.frame(installed.packages()[, c(1, 3:4)]); \
packages.df <- packages.df[is.na(packages.df$Priority), 1:2, drop=FALSE]; \
colnames(packages.df) <- NULL; \
print(packages.df, row.names=FALSE);"
process_done = subprocess.run(
["Rscript", "-e", r_commands], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"R command failed to retrieves installed packages: %s"
% process_done.stderr.decode().strip()
)
r_pkgs = dict()
for item in process_done.stdout.decode().strip().split("\n"):
name, version = item.strip().split()
r_pkgs[name] = version
# "Brainarray" is a collection that consists of 121 ".*ensgprobe" packages.
# They share the same version number, so we use 'hgu133plus2hsensgprobe'
# package to report this uniform version.
ba_proxy_pkg = "hgu133plus2hsensgprobe"
pkg_info = dict()
for pkg in pkg_list:
if pkg == "Bioconductor":
version = get_bioc_version()
else:
try:
version = r_pkgs[pkg] if pkg != "Brainarray" else r_pkgs[ba_proxy_pkg]
except KeyError:
raise Exception("R package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_checksums(filenames_list):
"""Returns a dictionary in which each key is a file's name and the
corresponding value is the file's md5 checksum.
"""
checksums = dict()
for filename in filenames_list:
abs_filepath = os.path.join(DIRNAME, filename)
process_done = subprocess.run(
["md5sum", abs_filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception("md5sum command error:", process_done.stderr.decode().strip())
checksum_str = process_done.stdout.decode().strip().split()[0]
checksums[filename] = checksum_str
return checksums
def get_runtime_env(yml_filename):
"""Reads input YAML filename and returns a dictionary in which each key
is a category name of runtime environment and the corresponding value
is an object that includes version information of packages listed in
that category.
"""
runtime_env = dict()
with open(yml_filename) as yml_fh:
pkgs = yaml.load(yml_fh)
for pkg_type, pkg_list in pkgs.items():
if pkg_type == "os_distribution":
value = get_os_distro()
elif pkg_type == "os_pkg":
value = get_os_pkgs(pkg_list)
elif pkg_type == "cmd_line":
value = get_cmd_lines(pkg_list)
elif pkg_type == "python":
value = get_pip_pkgs(pkg_list)
elif pkg_type == "R":
value = get_r_pkgs(pkg_list)
elif pkg_type == "checksum":
value = get_checksums(pkg_list)
else:
raise Exception("Unknown category in %s: %s" % (yml_filename, pkg_type))
runtime_env[pkg_type] = value
return runtime_env
def find_processor(enum_key):
"""Retursn either a newly created Processor record, or the one in
database that matches the current processor name, version and environment.
"""
name = ProcessorEnum[enum_key].value["name"]
docker_image = ProcessorEnum[enum_key].value["docker_img"]
# In current implementation, ALWAYS get the runtime environment.
yml_path = os.path.join(DIRNAME, ProcessorEnum[enum_key].value["yml_file"])
environment = get_runtime_env(yml_path)
obj, status = Processor.objects.get_or_create(
name=name, version=SYSTEM_VERSION, docker_image=docker_image, environment=environment
)
return obj
def handle_processor_exception(job_context, processor_key, ex):
err_str = "Failed to set processor: %s" % ex
logger.error(err_str, job_id=job_context["job"].id, processor=processor_key)
job_context["job"].failure_reason = err_str
job_context["success"] = False
return job_context
def cache_keys(*keys, work_dir_key="work_dir"):
""" Decorator to be applied to a pipeline function.
Returns a new function that calls the original one and caches the given
keys into the `work_dir`. On the next call it will load those keys (if they
exist) and add them to the job_context instead of executing the function. """
def inner(func):
# generate a unique name for the cache based on the pipeline name
# and the cached keys
cache_name = "__".join(list(keys) + [func.__name__])
def pipeline(job_context):
cache_path = os.path.join(job_context[work_dir_key], cache_name)
if os.path.exists(cache_path):
# cached values exist, load keys from cacke
try:
values = pickle.load(open(cache_path, "rb"))
return {**job_context, **values}
except:
# don't fail if we can't load the cache
logger.warning(
"Failed to load cached data for pipeline function.",
function_name=func.__name__,
keys=keys,
)
pass
# execute the actual function
job_context = func(job_context)
try:
# save cached data for the next run
values = {key: job_context[key] for key in keys}
pickle.dump(values, open(cache_path, "wb"))
except:
# don't fail if we can't save the cache
logger.warning(
"Failed to cache data for pipeline function.",
function_name=func.__name__,
keys=keys,
)
pass
return job_context
return pipeline
return inner
<|code_end|>
| foreman/data_refinery_foreman/surveyor/transcriptome_index.py
<|code_start|>import csv
import re
import shutil
import urllib
from abc import ABC
from typing import List, Dict
from data_refinery_common.job_lookup import Downloaders
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
OriginalFile,
SurveyJobKeyValue,
)
from data_refinery_foreman.surveyor import utils
from data_refinery_foreman.surveyor.external_source import ExternalSourceSurveyor
logger = get_and_configure_logger(__name__)
CHUNK_SIZE = 1024 * 256 # chunk_size is in bytes
MAIN_DIVISION_URL_TEMPLATE = "https://rest.ensembl.org/info/species?content-type=application/json"
DIVISION_URL_TEMPLATE = (
"https://rest.ensembl.org/info/genomes/division/{division}?content-type=application/json"
)
SPECIES_DETAIL_URL_TEMPLATE = (
"ftp://ftp.ensemblgenomes.org/pub/{short_division}/current/species_{division}.txt"
)
TRANSCRIPTOME_URL_TEMPLATE = (
"ftp://ftp.{url_root}/fasta/{collection}{species_sub_dir}/dna/"
"{filename_species}.{assembly}.dna.{schema_type}.fa.gz"
)
GTF_URL_TEMPLATE = (
"ftp://ftp.{url_root}/gtf/{collection}{species_sub_dir}/"
"{filename_species}.{assembly}.{assembly_version}.gtf.gz"
)
# For whatever reason the division in the download URL is shortened in
# a way that doesn't seem to be discoverable programmatically. I've
# therefore created this lookup map:
DIVISION_LOOKUP = {
"EnsemblPlants": "plants",
"EnsemblFungi": "fungi",
"EnsemblBacteria": "bacteria",
"EnsemblProtists": "protists",
"EnsemblMetazoa": "metazoa",
}
# Ensembl will periodically release updated versions of the
# assemblies. All divisions other than the main one have identical
# release versions. These urls will return what the most recent
# release version is.
MAIN_RELEASE_URL = "https://rest.ensembl.org/info/software?content-type=application/json"
DIVISION_RELEASE_URL = "https://rest.ensembl.org/info/eg_version?content-type=application/json"
def get_strain_mapping_for_organism(
species_name: str, config_file="config/organism_strain_mapping.csv"
) -> List[Dict]:
"""Returns the row of the strain/organism mapping for the species_name
"""
upper_name = species_name.upper()
with open(config_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row["organism"] == upper_name:
return row
return None
def get_species_detail_by_assembly(assembly: str, division: str) -> str:
"""Returns additional detail about a species given an assembly and a division.
These details are necessary because the FTP directory for
EnsemblBacteria and EnsemblFungi have an additional level in their
paths that can only be determined by parsing this file. I found
this out via the Ensembl dev mailing list.
"""
bacteria_species_detail_url = SPECIES_DETAIL_URL_TEMPLATE.format(
short_division=DIVISION_LOOKUP[division], division=division
)
urllib.request.urlcleanup()
collection_path = assembly + "_collection.tsv"
with open(collection_path, "wb") as collection_file:
with urllib.request.urlopen(bacteria_species_detail_url) as request:
shutil.copyfileobj(request, collection_file, CHUNK_SIZE)
# Ancient unresolved bug. WTF python: https://bugs.python.org/issue27973
urllib.request.urlcleanup()
with open(collection_path) as csvfile:
reader = csv.DictReader(csvfile, delimiter="\t")
for row in reader:
if row["assembly"] == assembly:
return row
class EnsemblUrlBuilder(ABC):
"""Generates URLs for different divisions of Ensembl.
Each division of Ensembl has different conventions for its
URLs. The logic contained in the init method of this base class is
appropriate for most, but not all of the divisions. However, the
logic contained in the build_* methods of this class is
appropriate for all divisions.
"""
def __init__(self, species: Dict):
"""Species is a Dict containing parsed JSON from the Division API."""
self.url_root = "ensemblgenomes.org/pub/release-{assembly_version}/{short_division}"
self.division = species["division"]
self.short_division = DIVISION_LOOKUP[species["division"]]
mapping = get_strain_mapping_for_organism(species["name"])
if mapping:
self.assembly = mapping["assembly"]
self.strain = mapping["strain"]
else:
self.assembly = species["assembly_name"].replace(" ", "_")
self.strain = None
assembly_response = utils.requests_retry_session().get(DIVISION_RELEASE_URL)
self.assembly_version = assembly_response.json()["version"]
self.species_sub_dir = species["name"]
self.filename_species = species["name"].capitalize()
# These fields aren't needed for the URL, but they vary between
# the two REST APIs.
self.scientific_name = species["name"].upper()
self.taxonomy_id = species["taxonomy_id"]
# This field is only needed for EnsemblBacteria and EnsemblFungi.
self.collection = ""
def build_transcriptome_url(self) -> str:
url_root = self.url_root.format(
assembly_version=self.assembly_version, short_division=self.short_division
)
url = TRANSCRIPTOME_URL_TEMPLATE.format(
url_root=url_root,
species_sub_dir=self.species_sub_dir,
collection=self.collection,
filename_species=self.filename_species,
assembly=self.assembly,
schema_type="primary_assembly",
)
# If the primary_assembly is not available use toplevel instead.
try:
# Ancient unresolved bug. WTF python: https://bugs.python.org/issue27973
urllib.request.urlcleanup()
file_handle = urllib.request.urlopen(url)
file_handle.close()
urllib.request.urlcleanup()
except Exception:
url = url.replace("primary_assembly", "toplevel")
return url
def build_gtf_url(self) -> str:
url_root = self.url_root.format(
assembly_version=self.assembly_version, short_division=self.short_division
)
return GTF_URL_TEMPLATE.format(
url_root=url_root,
species_sub_dir=self.species_sub_dir,
collection=self.collection,
filename_species=self.filename_species,
assembly=self.assembly,
assembly_version=self.assembly_version,
)
class MainEnsemblUrlBuilder(EnsemblUrlBuilder):
"""Special logic specific to the main Ensembl division.
There is one Ensembl division which is just called Ensembl. This
is confusing so I refer to it as the main Ensembl division. It
follows the same general pattern as the rest of them for URLs, but
just not quite the same base URL structure. Also its REST API
returns JSON with similar data except with slightly different key
names.
"""
def __init__(self, species: Dict):
self.url_root = "ensembl.org/pub/release-{assembly_version}"
self.short_division = None
self.species_sub_dir = species["name"]
self.collection = ""
self.filename_species = species["name"].capitalize()
self.assembly = species["assembly"]
self.assembly_version = (
utils.requests_retry_session().get(MAIN_RELEASE_URL).json()["release"]
)
self.scientific_name = self.filename_species.replace("_", " ")
self.taxonomy_id = species["taxon_id"]
class EnsemblProtistsUrlBuilder(EnsemblUrlBuilder):
"""Special logic specific to the EnsemblProtists division.
EnsemblProtists is special because the first letter of the species
name is always capitalized within the name of the file, instead of
only when there's not a collection subnested.
"""
def __init__(self, species: Dict):
super().__init__(species)
self.filename_species = species["name"].capitalize()
class EnsemblBacteriaUrlBuilder(EnsemblUrlBuilder):
"""The EnsemblBacteria URLs are extra tricky because they have an extra layer in them.
This requires parsing a special file to find out what collection
the species belongs to.
"""
def __init__(self, species: Dict):
super().__init__(species)
species_detail = get_species_detail_by_assembly(self.assembly, self.division)
if species_detail:
self.species_sub_dir = species_detail["species"]
self.filename_species = species_detail["species"].capitalize()
collection_pattern = r"bacteria_.+_collection"
match = re.match(collection_pattern, species_detail["core_db"])
if match:
# Need to append a / to the collection because it's not
# present in all the routes so we don't want to put it in the
# template and end up with a // in the path if collection is
# blank.
self.collection = match.group(0) + "/"
class EnsemblFungiUrlBuilder(EnsemblUrlBuilder):
"""The EnsemblFungi URLs work similarly to EnsemblBacteria.
EnsemblFungi is special because there is an assembly_name TIGR
which needs to be corrected to CADRE for some reason.
"""
def __init__(self, species: Dict):
super().__init__(species)
if self.assembly == "TIGR":
self.assembly = "CADRE"
species_detail = get_species_detail_by_assembly(self.assembly, self.division)
if species_detail:
self.species_sub_dir = species_detail["species"]
self.filename_species = species_detail["species"].capitalize()
collection_pattern = r"fungi_.+_collection"
match = re.match(collection_pattern, species_detail["core_db"])
if match:
# Need to append a / to the collection because it's not
# present in all the routes so we don't want to put it in the
# template and end up with a // in the path if collection is
# blank.
self.collection = match.group(0) + "/"
def ensembl_url_builder_factory(species: Dict) -> EnsemblUrlBuilder:
"""Returns instance of EnsemblUrlBuilder or one of its subclasses.
The class of the returned object is based on the species' division.
"""
if species["division"] == "EnsemblProtists":
return EnsemblProtistsUrlBuilder(species)
elif species["division"] == "EnsemblFungi":
return EnsemblFungiUrlBuilder(species)
elif species["division"] == "EnsemblVertebrates":
return MainEnsemblUrlBuilder(species)
elif species["division"] == "EnsemblBacteria":
return EnsemblBacteriaUrlBuilder(species)
else:
return EnsemblUrlBuilder(species)
class TranscriptomeIndexSurveyor(ExternalSourceSurveyor):
def source_type(self):
return Downloaders.TRANSCRIPTOME_INDEX.value
def _clean_metadata(self, species: Dict) -> Dict:
"""Removes fields from metadata which shouldn't be stored.
Also cast any None values to str so they can be stored in the
database.
These fields shouldn't be stored because:
The taxonomy id is stored as fields on the Organism.
Aliases and groups are lists we don't need.
"""
species.pop("taxon_id") if "taxon_id" in species else None
species.pop("taxonomy_id") if "taxonomy_id" in species else None
species.pop("aliases") if "aliases" in species else None
species.pop("groups") if "groups" in species else None
# Cast to List since we're modifying the size of the dict
# while iterating over it
for k, v in list(species.items()):
if v is None:
species.pop(k)
else:
species[k] = str(v)
return species
def _generate_files(self, species: Dict) -> None:
url_builder = ensembl_url_builder_factory(species)
fasta_download_url = url_builder.build_transcriptome_url()
gtf_download_url = url_builder.build_gtf_url()
platform_accession_code = species.pop("division")
self._clean_metadata(species)
all_new_files = []
fasta_filename = url_builder.filename_species + ".fa.gz"
original_file = OriginalFile()
original_file.source_filename = fasta_filename
original_file.source_url = fasta_download_url
original_file.is_archive = True
original_file.is_downloaded = False
original_file.save()
all_new_files.append(original_file)
gtf_filename = url_builder.filename_species + ".gtf.gz"
original_file = OriginalFile()
original_file.source_filename = gtf_filename
original_file.source_url = gtf_download_url
original_file.is_archive = True
original_file.is_downloaded = False
original_file.save()
all_new_files.append(original_file)
return all_new_files
def survey(self, source_type=None) -> bool:
"""
Surveying here is a bit different than discovering an experiment
and samples.
"""
if source_type != "TRANSCRIPTOME_INDEX":
return False
try:
species_files = self.discover_species()
except Exception:
logger.exception(
"Exception caught while discovering species. Terminating survey job.",
survey_job=self.survey_job.id,
)
return False
try:
for specie_file_list in species_files:
self.queue_downloader_job_for_original_files(
specie_file_list, is_transcriptome=True
)
except Exception:
logger.exception(
"Failed to queue downloader jobs. Terminating survey job.",
survey_job=self.survey_job.id,
)
return False
return True
def discover_species(self):
ensembl_division = SurveyJobKeyValue.objects.get(
survey_job_id=self.survey_job.id, key__exact="ensembl_division"
).value
logger.info(
"Surveying %s division of ensembl.", ensembl_division, survey_job=self.survey_job.id,
)
try:
organism_name = SurveyJobKeyValue.objects.get(
survey_job_id=self.survey_job.id, key__exact="organism_name"
).value
organism_name = organism_name.lower().replace(" ", "_")
except SurveyJobKeyValue.DoesNotExist:
organism_name = None
if ensembl_division in ["EnsemblFungi", "EnsemblBacteria"]:
if organism_name is None:
logger.error(
"Organism name must be specified for Fungi and Bacteria divisions.",
ensembl_division=ensembl_division,
organism_name=organism_name,
)
return []
else:
if get_strain_mapping_for_organism(organism_name) is None:
logger.error(
(
"Organism name must be listed in config/organism_strain_"
"mappings.csv for Fungi and Bacteria divisions."
),
ensembl_division=ensembl_division,
organism_name=organism_name,
)
return []
# The main division has a different base URL for its REST API.
if ensembl_division == "Ensembl":
r = utils.requests_retry_session().get(MAIN_DIVISION_URL_TEMPLATE)
# Yes I'm aware that specieses isn't a word. However I need to
# distinguish between a singlular species and multiple species.
specieses = r.json()["species"]
else:
formatted_division_url = DIVISION_URL_TEMPLATE.format(division=ensembl_division)
r = utils.requests_retry_session().get(formatted_division_url)
specieses = r.json()
all_new_species = []
if organism_name:
for species in specieses:
# This key varies based on whether the division is the
# main one or not... why couldn't they just make them
# consistent?
if ("species" in species and organism_name in species["species"]) or (
"name" in species and organism_name in species["name"]
):
# Fungi have a strain identifier in their
# names. This is different than everything else,
# so we're going to handle this special case by
# just overwriting this. This is okay because we
# just have to discover one species for the
# organism, and then our strain mapping will make
# sure we use the correct strain and assembly.
if ensembl_division == "EnsemblFungi" and organism_name != species["name"]:
species["name"] = organism_name
all_new_species.append(self._generate_files(species))
break
else:
for species in specieses:
all_new_species.append(self._generate_files(species))
if len(all_new_species) == 0:
logger.error(
"Unable to find any species!",
ensembl_division=ensembl_division,
organism_name=organism_name,
)
return all_new_species
<|code_end|>
workers/data_refinery_workers/processors/utils.py
<|code_start|>import os
import random
import shutil
import signal
import string
import subprocess
import sys
import yaml
import pickle
from django.conf import settings
from django.utils import timezone
from typing import List, Dict, Callable
from data_refinery_common.job_lookup import ProcessorEnum, ProcessorPipeline, SMASHER_JOB_TYPES
from data_refinery_common.job_management import create_downloader_job
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
Dataset,
DownloaderJob,
DownloaderJobOriginalFileAssociation,
OriginalFile,
OriginalFileSampleAssociation,
Pipeline,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
ProcessorJobOriginalFileAssociation,
Sample,
)
from data_refinery_common.utils import (
get_env_variable,
get_env_variable_gracefully,
get_instance_id,
)
logger = get_and_configure_logger(__name__)
# Let this fail if SYSTEM_VERSION is unset.
SYSTEM_VERSION = get_env_variable("SYSTEM_VERSION")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
S3_QN_TARGET_BUCKET_NAME = get_env_variable("S3_QN_TARGET_BUCKET_NAME", "data-refinery")
DIRNAME = os.path.dirname(os.path.abspath(__file__))
CURRENT_JOB = None
def signal_handler(sig, frame):
"""Signal Handler, works for both SIGTERM and SIGINT"""
global CURRENT_JOB
if CURRENT_JOB:
CURRENT_JOB.success = False
CURRENT_JOB.end_time = timezone.now()
CURRENT_JOB.num_retries = CURRENT_JOB.num_retries - 1
CURRENT_JOB.failure_reason = "Interruped by SIGTERM/SIGINT: " + str(sig)
CURRENT_JOB.save()
sys.exit(0)
def prepare_original_files(job_context):
""" Provision in the Job context for OriginalFile-driven processors
"""
job = job_context["job"]
original_files = job.original_files.all()
if original_files.count() == 0:
raise ProcessorJobError("No files were found for the job.", success=False)
undownloaded_files = set()
for original_file in original_files:
if original_file.needs_downloading(job_context["job_id"]):
if original_file.is_downloaded:
# If it needs to be downloaded then it's not
# downloaded and the is_downloaded field should stop
# lying about that.
original_file.is_downloaded = False
original_file.save()
undownloaded_files.add(original_file)
if undownloaded_files:
logger.info(
(
"One or more files found which were missing or not downloaded."
" Creating downloader jobs for them and deleting this job."
),
processor_job=job.id,
missing_files=list(undownloaded_files),
)
was_job_created = create_downloader_job(
undownloaded_files, processor_job_id=job_context["job_id"], force=True
)
if not was_job_created:
raise ProcessorJobError(
"Missing file for processor job but unable to recreate downloader jobs!",
success=False,
)
raise ProcessorJobError(
"We can not process the data because it is not on the disk",
success=False,
no_retry=True, # this job should not be retried again
abort=True, # abort the job and don't do anything else
undownloaded_files=[file.id for file in undownloaded_files],
)
job_context["original_files"] = original_files
first_original_file = original_files.first()
samples = Sample.objects.filter(original_files=first_original_file)
job_context["samples"] = samples
job_context["computed_files"] = []
return job_context
def prepare_dataset(job_context):
""" Provision in the Job context for Dataset-driven processors
"""
job = job_context["job"]
job_datasets = job.datasets.all()
# This should never be more than one!
if job_datasets.count() > 1:
raise ProcessorJobError(
"More than one dataset for processor job!", success=False, no_retry=True
)
elif job_datasets.count() == 0:
raise ProcessorJobError(
"No datasets found for processor job!", success=False, no_retry=True
)
dataset = job_datasets.first()
dataset.is_processing = True
dataset.save()
# Get the samples to smash
job_context["dataset"] = dataset
job_context["samples"] = dataset.get_aggregated_samples()
job_context["experiments"] = dataset.get_experiments()
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def start_job(job_context: Dict):
"""A processor function to start jobs.
Record in the database that this job is being started and
retrieves the job's batches from the database and adds them to the
dictionary passed in with the key 'batches'.
"""
job = job_context["job"]
original_file = job.original_files.first()
if (
not job.pipeline_applied == ProcessorPipeline.TXIMPORT.value
and original_file
and not original_file.needs_processing(job_context["job_id"])
):
failure_reason = (
"Sample has a good computed file, it must have been processed, "
"so it doesn't need to be downloaded! Aborting!"
)
logger.error(failure_reason, job_id=job.id, original_file=original_file)
job_context["original_files"] = []
job_context["computed_files"] = []
job_context["abort"] = True
# Will be saved by end_job.
job_context["job"].failure_reason = failure_reason
return job_context
# Set up the SIGTERM handler so we can appropriately handle being interrupted.
# (`docker stop` uses SIGTERM, not SIGINT.)
# (however, Nomad sends an SIGINT so catch both.)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# This job should not have been started, for some reason Nomad restarts some of our jobs
# https://github.com/AlexsLemonade/refinebio/issues/1487
if job.start_time is not None and settings.RUNNING_IN_CLOUD:
# Let's just log the event and let the job run instead of failing
# and also reset the endtime and failure reason, since those fields might have been set
logger.warn(
"ProcessorJob was restarted by Nomad. We do not know why this happened",
processor_job=job.id,
success=job.success,
failure_reason=job.failure_reason,
start_time=job.start_time,
end_time=job.end_time,
)
job.end_time = None
job.failure_reason = None
job.worker_id = get_instance_id()
job.worker_version = SYSTEM_VERSION
job.start_time = timezone.now()
job.save()
global CURRENT_JOB
CURRENT_JOB = job
logger.debug("Starting processor Job.", processor_job=job.id, pipeline=job.pipeline_applied)
# Janitor jobs don't operate on file objects.
# Tximport jobs don't need to download the original file, they
# just need it to know what experiment to process.
if job.pipeline_applied not in [
ProcessorPipeline.JANITOR.value,
ProcessorPipeline.TXIMPORT.value,
]:
# Some jobs take OriginalFiles, other take Datasets
if ProcessorPipeline[job.pipeline_applied] not in SMASHER_JOB_TYPES:
job_context = prepare_original_files(job_context)
if not job_context.get("success", True):
return job_context
else:
job_context = prepare_dataset(job_context)
if not job_context.get("success", True):
return job_context
else:
# Just in case
job_context["original_files"] = []
job_context["computed_files"] = []
return job_context
def end_job(job_context: Dict, abort=False):
"""A processor function to end jobs.
Record in the database that this job has completed and that
the samples have been processed if not aborted.
"""
job = job_context["job"]
if "success" in job_context:
success = job_context["success"]
else:
success = True
# Upload first so if this fails we can set success = False and let
# the rest of the function mark it as failed.
if success:
# QN reference files go to a special bucket so they can be
# publicly available.
if job_context["job"].pipeline_applied == "QN_REFERENCE":
s3_bucket = S3_QN_TARGET_BUCKET_NAME
else:
s3_bucket = S3_BUCKET_NAME
# S3-sync Computed Files
for computed_file in job_context.get("computed_files", []):
# Ensure even distribution across S3 servers
nonce = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(24)
)
result = computed_file.sync_to_s3(s3_bucket, nonce + "_" + computed_file.filename)
if result and settings.RUNNING_IN_CLOUD:
computed_file.delete_local_file()
elif not result:
success = False
job_context["success"] = False
job.failure_reason = "Failed to upload computed file."
break
if not success:
for computed_file in job_context.get("computed_files", []):
computed_file.delete_local_file()
if computed_file.id:
computed_file.delete()
if not abort:
if job_context.get("success", False) and not (
job_context["job"].pipeline_applied
in [
ProcessorPipeline.SMASHER.value,
ProcessorPipeline.QN_REFERENCE.value,
ProcessorPipeline.CREATE_COMPENDIA.value,
ProcessorPipeline.CREATE_QUANTPENDIA.value,
ProcessorPipeline.JANITOR.value,
]
):
# Salmon requires the final `tximport` step to be fully `is_processed`.
mark_as_processed = True
if job_context["job"].pipeline_applied == "SALMON" and not job_context.get(
"tximported", False
):
mark_as_processed = False
if mark_as_processed:
# This handles most of our cases
unique_experiments = []
for sample in job_context.get("samples", []):
sample.is_processed = True
sample.save()
if sample.experiments.all().count() > 0:
unique_experiments = list(
set(unique_experiments + sample.experiments.all()[::1])
)
# Explicitly for the single-salmon scenario
if "sample" in job_context:
sample = job_context["sample"]
sample.is_processed = True
sample.save()
for experiment in unique_experiments:
experiment.update_num_samples()
# If we are aborting, it's because we want to do something
# different, so leave the original files so that "something
# different" can use them.
if (success or job.no_retry) and not abort:
# Cleanup Original Files
if "original_files" in job_context:
for original_file in job_context["original_files"]:
if original_file.needs_processing(job.id):
original_file.delete_local_file()
# If the pipeline includes any steps, save it.
if "pipeline" in job_context:
pipeline = job_context["pipeline"]
if len(pipeline.steps):
pipeline.save()
if (
"work_dir" in job_context
and job_context["job"].pipeline_applied != ProcessorPipeline.CREATE_COMPENDIA.value
and settings.RUNNING_IN_CLOUD
):
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
job.abort = abort
job.success = success
job.end_time = timezone.now()
job.save()
if success:
logger.debug(
"Processor job completed successfully.",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
)
else:
if not job.failure_reason:
logger.error(
"Processor job failed without having failure_reason set. FIX ME!!!!!!!!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
)
else:
logger.error(
"Processor job failed!",
processor_job=job.id,
pipeline_applied=job.pipeline_applied,
no_retry=job.no_retry,
failure_reason=job.failure_reason,
)
# Return Final Job context so testers can check it
return job_context
def run_pipeline(start_value: Dict, pipeline: List[Callable]):
"""Runs a pipeline of processor functions.
start_value must contain a key 'job_id' which is a valid id for a
ProcessorJob record.
Each processor fuction must accept a dictionary and return a
dictionary.
Any processor function which returns a dictionary containing a key
of 'success' with a value of False will cause the pipeline to
terminate with a call to utils.end_job.
The key 'job' is reserved for the ProcessorJob currently being
run. It is required that the dictionary returned by each
processor function preserve the mapping for 'job' that was passed
into it.
"""
job_id = start_value["job_id"]
try:
job = ProcessorJob.objects.get(id=job_id)
except ProcessorJob.DoesNotExist:
logger.error("Cannot find processor job record.", processor_job=job_id)
return
if len(pipeline) == 0:
logger.error("Empty pipeline specified.", procesor_job=job_id)
last_result = start_value
last_result["job"] = job
for processor in pipeline:
try:
last_result = processor(last_result)
except ProcessorJobError as e:
e.update_job(job)
logger.exception(e.failure_reason, processor_job=job.id, **e.context)
if e.success is False:
# end_job will use this and set the value
last_result["success"] = False
return end_job(last_result, abort=bool(e.abort))
except Exception as e:
failure_reason = (
"Unhandled exception caught while running processor" " function {} in pipeline: "
).format(processor.__name__)
logger.exception(failure_reason, no_retry=job.no_retry, processor_job=job_id)
last_result["success"] = False
last_result["job"].failure_reason = failure_reason + str(e)
return end_job(last_result)
if "success" in last_result and last_result["success"] is False:
logger.error(
"Processor function %s failed. Terminating pipeline.",
processor.__name__,
processor_job=job_id,
failure_reason=last_result["job"].failure_reason,
)
return end_job(last_result)
if last_result.get("abort", False):
return end_job(last_result, abort=True)
return last_result
class ProcessorJobError(Exception):
""" General processor job error class. """
def __init__(
self, failure_reason, *, success=None, no_retry=None, retried=None, abort=None, **context
):
super(ProcessorJobError, self).__init__(failure_reason)
self.failure_reason = failure_reason
self.success = success
self.no_retry = no_retry
self.retried = retried
self.abort = abort
# additional context to be included when logging
self.context = context
def update_job(self, job):
job.failure_reason = self.failure_reason
if self.success is not None:
job.success = self.success
if self.no_retry is not None:
job.no_retry = self.no_retry
if self.retried is not None:
job.retried = self.retried
if self.abort is not None:
job.abort = self.abort
job.save()
# also update the failure reason if this is a dataset's processor job
for dataset in job.datasets.all():
dataset.failure_reason = self.failure_reason
dataset.success = False
dataset.save()
def get_os_distro():
"""Returns a string of OS distribution.
Since we are using Docker, this function only considers Linux distribution.
Alternative files on Linux: /etc/os-release, /etc/lsb-release
As a matter of fact, "/etc/issue" doesn't exist on Mac OS X. We can use
"sw_vers" command to find its OS information.
A more cross-platform solution is using "platform" module in Python.
"""
with open("/etc/issue") as distro_fh:
return distro_fh.readline().strip("\l\n\\n ")
def get_os_pkgs(pkg_list):
"""Returns a dictionay in which each key is the name of an os-lvel
package and the corresponding value is the package's version.
This function assumes the package manager is Debian-based (dpkg/apt).
"""
pkg_info = dict()
for pkg in pkg_list:
process_done = subprocess.run(
["dpkg-query", "--show", pkg], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"OS-level package %s not found: %s" % (pkg, process_done.stderr.decode().strip())
)
version = process_done.stdout.decode().strip().split("\t")[-1]
pkg_info[pkg] = version
return pkg_info
def get_cmd_lines(cmd_list):
"""Returns a dictionary in which each key is a command string and
the corresponding value is the command's stripped output.
"""
cmd_info = dict()
for cmd in cmd_list:
process_done = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception(
"Failed to run command line '%s': %s" % (cmd, process_done.stderr.decode().strip())
)
output_bytes = process_done.stdout
# Workaround for the "salmontools --version"
# command, whose outputs are sent to stderr instead of stdout.
# Alternatively, we could have used "stderr=subprocess.STDOUT" when
# initializing process_done, but it is probably a better idea to
# keep stdout and stderr separate.
base_cmd = cmd.strip().split()[0]
if base_cmd == "salmontools":
output_bytes = process_done.stderr
cmd_output = output_bytes.decode().strip()
cmd_info[cmd] = cmd_output
return cmd_info
def get_pip_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a pip-installed
package and the corresponding value is the package's version.
Instead of using: `pip show pkg | grep Version | awk '{print $2}'` to get
each package's version, we save the output of `pip freeze` first, then
check the version of each input package in pkg_list. This approach
launches the subprocess only once and (hopefully) saves some computational
resource.
"""
process_done = subprocess.run(["pip", "freeze"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if process_done.returncode:
raise Exception("'pip freeze' failed: %s" % process_done.stderr.decode().strip())
frozen_pkgs = dict()
for item in process_done.stdout.decode().split():
name, version = item.split("==")
frozen_pkgs[name] = version
pkg_info = dict()
for pkg in pkg_list:
try:
version = frozen_pkgs[pkg]
except KeyError:
raise Exception("Pip package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_bioc_version():
"""Returns a string that is the version of "Bioconductor" package in R.
Note that the data frame returned by installed.packages() does NOT include
a package named "Bioconductor", so we have to launch another R command to
find "Bioconductor" version.
"""
r_command = "tools:::.BioC_version_associated_with_R_version()"
process_done = subprocess.run(
["Rscript", "-e", r_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"R command failed to retrieve Bioconductor version: %s"
% process_done.stderr.decode().strip()
)
version = process_done.stdout.decode().strip().split()[-1]
version = version[1:-1] # Remove the leading and trailing non-ascii characters.
if len(version) == 0:
raise Exception("Bioconductor not found")
return version
def get_r_pkgs(pkg_list):
"""Returns a dictionary in which each key is the name of a R package
and the corresponding value is the package's version.
"""
# Use "Rscript -e <R_commands>" command to get all user-installed R packages.
r_commands = "packages.df <- as.data.frame(installed.packages()[, c(1, 3:4)]); \
packages.df <- packages.df[is.na(packages.df$Priority), 1:2, drop=FALSE]; \
colnames(packages.df) <- NULL; \
print(packages.df, row.names=FALSE);"
process_done = subprocess.run(
["Rscript", "-e", r_commands], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception(
"R command failed to retrieves installed packages: %s"
% process_done.stderr.decode().strip()
)
r_pkgs = dict()
for item in process_done.stdout.decode().strip().split("\n"):
name, version = item.strip().split()
r_pkgs[name] = version
# "Brainarray" is a collection that consists of 121 ".*ensgprobe" packages.
# They share the same version number, so we use 'hgu133plus2hsensgprobe'
# package to report this uniform version.
ba_proxy_pkg = "hgu133plus2hsensgprobe"
pkg_info = dict()
for pkg in pkg_list:
if pkg == "Bioconductor":
version = get_bioc_version()
else:
try:
version = r_pkgs[pkg] if pkg != "Brainarray" else r_pkgs[ba_proxy_pkg]
except KeyError:
raise Exception("R package not found: %s" % pkg)
pkg_info[pkg] = version
return pkg_info
def get_checksums(filenames_list):
"""Returns a dictionary in which each key is a file's name and the
corresponding value is the file's md5 checksum.
"""
checksums = dict()
for filename in filenames_list:
abs_filepath = os.path.join(DIRNAME, filename)
process_done = subprocess.run(
["md5sum", abs_filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if process_done.returncode:
raise Exception("md5sum command error:", process_done.stderr.decode().strip())
checksum_str = process_done.stdout.decode().strip().split()[0]
checksums[filename] = checksum_str
return checksums
def get_runtime_env(yml_filename):
"""Reads input YAML filename and returns a dictionary in which each key
is a category name of runtime environment and the corresponding value
is an object that includes version information of packages listed in
that category.
"""
runtime_env = dict()
with open(yml_filename) as yml_fh:
pkgs = yaml.load(yml_fh)
for pkg_type, pkg_list in pkgs.items():
if pkg_type == "os_distribution":
value = get_os_distro()
elif pkg_type == "os_pkg":
value = get_os_pkgs(pkg_list)
elif pkg_type == "cmd_line":
value = get_cmd_lines(pkg_list)
elif pkg_type == "python":
value = get_pip_pkgs(pkg_list)
elif pkg_type == "R":
value = get_r_pkgs(pkg_list)
elif pkg_type == "checksum":
value = get_checksums(pkg_list)
else:
raise Exception("Unknown category in %s: %s" % (yml_filename, pkg_type))
runtime_env[pkg_type] = value
return runtime_env
def find_processor(enum_key):
"""Retursn either a newly created Processor record, or the one in
database that matches the current processor name, version and environment.
"""
name = ProcessorEnum[enum_key].value["name"]
docker_image = ProcessorEnum[enum_key].value["docker_img"]
# In current implementation, ALWAYS get the runtime environment.
yml_path = os.path.join(DIRNAME, ProcessorEnum[enum_key].value["yml_file"])
environment = get_runtime_env(yml_path)
obj, status = Processor.objects.get_or_create(
name=name, version=SYSTEM_VERSION, docker_image=docker_image, environment=environment
)
return obj
def handle_processor_exception(job_context, processor_key, ex):
err_str = "Failed to set processor: %s" % ex
logger.error(err_str, job_id=job_context["job"].id, processor=processor_key)
job_context["job"].failure_reason = err_str
job_context["success"] = False
return job_context
def cache_keys(*keys, work_dir_key="work_dir"):
""" Decorator to be applied to a pipeline function.
Returns a new function that calls the original one and caches the given
keys into the `work_dir`. On the next call it will load those keys (if they
exist) and add them to the job_context instead of executing the function. """
def inner(func):
# generate a unique name for the cache based on the pipeline name
# and the cached keys
cache_name = "__".join(list(keys) + [func.__name__])
def pipeline(job_context):
cache_path = os.path.join(job_context[work_dir_key], cache_name)
if os.path.exists(cache_path):
# cached values exist, load keys from cacke
try:
values = pickle.load(open(cache_path, "rb"))
return {**job_context, **values}
except:
# don't fail if we can't load the cache
logger.warning(
"Failed to load cached data for pipeline function.",
function_name=func.__name__,
keys=keys,
)
pass
# execute the actual function
job_context = func(job_context)
try:
# save cached data for the next run
values = {key: job_context[key] for key in keys}
pickle.dump(values, open(cache_path, "wb"))
except:
# don't fail if we can't save the cache
logger.warning(
"Failed to cache data for pipeline function.",
function_name=func.__name__,
keys=keys,
)
pass
return job_context
return pipeline
return inner
<|code_end|>
|
Engagement bot has gone away
### Problem or idea
Engagement bot hasn't been posting since 27th Dec
### Solution or next step
Fix the engagement bot
| api/data_refinery_api/views.py
<|code_start|>from datetime import timedelta, datetime
import requests
import nomad
from typing import Dict
from itertools import groupby
from re import match
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.db.models import Count, Prefetch, DateTimeField, OuterRef, Subquery
from django.db.models.functions import Trunc, Left
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import F, Q
from django.http import (
Http404,
JsonResponse,
)
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
)
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackend,
)
from django_filters.rest_framework import DjangoFilterBackend
from elasticsearch_dsl import TermsFacet
from rest_framework import status, filters, generics
from rest_framework.exceptions import APIException, NotFound
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.views import APIView
from data_refinery_api.serializers import (
ComputationalResultSerializer,
ComputationalResultWithUrlSerializer,
DetailedExperimentSerializer,
DetailedSampleSerializer,
ExperimentSerializer,
InstitutionSerializer,
OrganismIndexSerializer,
OrganismSerializer,
PlatformSerializer,
ProcessorSerializer,
CompendiumResultSerializer,
CompendiumResultWithUrlSerializer,
QNTargetSerializer,
ComputedFileListSerializer,
OriginalFileListSerializer,
# Job
DownloaderJobSerializer,
ProcessorJobSerializer,
SurveyJobSerializer,
# Dataset
APITokenSerializer,
CreateDatasetSerializer,
DatasetSerializer,
)
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
ComputationalResult,
ComputationalResultAnnotation,
CompendiumResult,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SurveyJob,
)
from data_refinery_common.models.documents import ExperimentDocument
from data_refinery_common.utils import get_active_volumes, get_nomad_jobs_breakdown, get_nomad_jobs
from data_refinery_common.logging import get_and_configure_logger
from .serializers import ExperimentDocumentSerializer
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
##
# ElasticSearch
##
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from six import iteritems
logger = get_and_configure_logger(__name__)
##
# Variables
##
JOB_CREATED_AT_CUTOFF = datetime(2019, 6, 5, tzinfo=timezone.utc)
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet["facet"].get_aggregation()
queryset.aggs.bucket(field, agg).metric(
"total_samples",
"cardinality",
field="downloadable_samples",
precision_threshold=40000,
)
return queryset
##
# ElasticSearch powered Search and Filter
##
@method_decorator(
name="list",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="technology",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name="has_publication",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name="platform",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name="organism",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name="num_processed_samples",
in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
""",
),
)
class ExperimentDocumentView(DocumentViewSet):
""" ElasticSearch powered experiment search. """
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended,
]
# Primitive
lookup_field = "id"
# Define search fields
# Is this exhaustive enough?
search_fields = {
"title": {"boost": 10},
"publication_authors": {"boost": 8}, # "People will search themselves"
"publication_title": {"boost": 5},
"submitter_institution": {"boost": 3},
"description": {"boost": 2},
"accession_code": None,
"alternate_accession_code": None,
"publication_doi": None,
"pubmed_id": None,
"sample_metadata_fields": None,
"platform_names": None,
}
# Define filtering fields
filter_fields = {
"id": {"field": "_id", "lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN],},
"technology": "technology",
"has_publication": "has_publication",
"platform": "platform_accession_codes",
"organism": "organism_names",
"num_processed_samples": {
"field": "num_processed_samples",
"lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN, LOOKUP_QUERY_GT],
},
"num_downloadable_samples": {
"field": "num_downloadable_samples",
"lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN, LOOKUP_QUERY_GT],
},
}
# Define ordering fields
ordering_fields = {
"id": "id",
"title": "title.raw",
"description": "description.raw",
"num_total_samples": "num_total_samples",
"num_downloadable_samples": "num_downloadable_samples",
"source_first_published": "source_first_published",
}
# Specify default ordering
ordering = (
"_score",
"-num_total_samples",
"id",
"title",
"description",
"-source_first_published",
)
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
"technology": {
"field": "technology",
"facet": TermsFacet,
"enabled": True, # These are enabled by default, which is more expensive but more simple.
},
"organism_names": {
"field": "organism_names",
"facet": TermsFacet,
"enabled": True,
"options": {"size": 999999},
},
"platform_accession_codes": {
"field": "platform_accession_codes",
"facet": TermsFacet,
"enabled": True,
"global": False,
"options": {"size": 999999},
},
"has_publication": {
"field": "has_publication",
"facet": TermsFacet,
"enabled": True,
"global": False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
"technology_global": {
"field": "technology",
"facet": TermsFacet,
"enabled": False,
"global": True,
},
"organism_names_global": {
"field": "organism_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
"options": {"size": 999999},
},
"platform_names_global": {
"field": "platform_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
"options": {"size": 999999},
},
"has_publication_global": {
"field": "platform_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
},
}
faceted_search_param = "facet"
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data["facets"] = self.transform_es_facets(response.data["facets"])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet["buckets"]:
if field == "has_publication":
filter_group[bucket["key_as_string"]] = bucket["total_samples"]["value"]
else:
filter_group[bucket["key"]] = bucket["total_samples"]["value"]
result[field] = filter_group
return result
##
# Dataset
##
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(
name="get",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="patch", decorator=swagger_auto_schema(auto_schema=None)
) # partial updates not supported
@method_decorator(
name="put",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
@staticmethod
def _should_display_on_engagement_bot(email: str) -> bool:
return (
email is not None
and email.find("cansav09") != 0
and email.find("arielsvn") != 0
and email.find("jaclyn.n.taroni") != 0
and email.find("kurt.wheeler") != 0
and email.find("greenescientist") != 0
and email.find("@alexslemonade.org") == -1
and email.find("miserlou") != 0
and email.find("michael.zietz@gmail.com") != 0
and email.find("d.prasad") != 0
and email.find("daniel.himmelstein@gmail.com") != 0
and email.find("dv.prasad991@gmail.com") != 0
)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that. """
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data["data"].keys():
accessions = new_data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
new_data["data"][key] = sample_codes
if old_object.is_processed:
raise APIException("You may not update Datasets which have already been processed")
if new_data.get("start"):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
raise APIException("You must provide an active API token ID")
supplied_email_address = self.request.data.get("email_address", None)
email_ccdl_ok = self.request.data.get("email_ccdl_ok", False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if supplied_email_address is not None:
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
if (
settings.RUNNING_IN_CLOUD
and settings.ENGAGEMENTBOT_WEBHOOK is not None
and DatasetView._should_display_on_engagement_bot(supplied_email_address)
):
try:
try:
remote_ip = get_client_ip(self.request)
city = requests.get(
"https://ipapi.co/" + remote_ip + "/json/", timeout=10
).json()["city"]
except Exception:
city = "COULD_NOT_DETERMINE"
user_agent = self.request.META.get("HTTP_USER_AGENT", None)
total_samples = len(
set(
[
accession_code
for experiment in new_data.values()
for accession_code in experiment
]
)
)
response = requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments": [
{
"color": "good",
"title": "New dataset download",
"fallback": "New dataset download",
"title_link": "http://www.refine.bio/dataset/{0}".format(
old_object.id
),
"text": "New user {0} from {1} downloaded a dataset!".format(
supplied_email_address, city
),
"footer": "Refine.bio | {0} | {1}".format(
remote_ip, user_agent
),
"footer_icon": "https://s3.amazonaws.com/refinebio-email/logo-2x.png",
"fields": [
{"title": "Dataset id", "value": str(old_object.id),},
{
"title": "Total downloads",
"value": Dataset.objects.filter(
email_address=supplied_email_address
).count(),
"short": True,
},
{
"title": "Samples",
"value": total_samples,
"short": True,
},
],
}
],
},
headers={"Content-Type": "application/json"},
timeout=10,
)
except Exception as e:
# It doens't really matter if this didn't work
logger.error(e)
pass
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data["data"] = old_data
serializer.validated_data["aggregate_by"] = old_aggregate
serializer.save()
class CreateApiTokenView(generics.CreateAPIView):
"""
token_create
This endpoint can be used to create and activate tokens. These tokens can be used
in requests that provide urls to download computed files. They are a way to accept
our terms of service.
```py
import requests
import json
response = requests.post('https://api.refine.bio/v1/token/')
token_id = response.json()['id']
response = requests.put('https://api.refine.bio/v1/token/' + token_id + '/', json.dumps({'is_activated': True}), headers={'Content-Type': 'application/json'})
```
The token id needs to be provided in the HTTP request in the API-KEY header.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
model = APIToken
serializer_class = APITokenSerializer
@method_decorator(name="patch", decorator=swagger_auto_schema(auto_schema=None))
class APITokenView(generics.RetrieveUpdateAPIView):
"""
Read and modify Api Tokens.
get:
Return details about a specific token.
put:
This can be used to activate a specific token by sending `is_activated: true`.
"""
model = APIToken
lookup_field = "id"
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
##
# Experiments
##
class ExperimentList(generics.ListAPIView):
""" Paginated list of all experiments. Advanced filtering can be done with the `/search` endpoint. """
model = Experiment
queryset = Experiment.public_objects.all()
serializer_class = ExperimentSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
"title",
"description",
"accession_code",
"alternate_accession_code",
"source_database",
"source_url",
"has_publication",
"publication_title",
"publication_doi",
"pubmed_id",
"organisms",
"submitter_institution",
"created_at",
"last_modified",
"source_first_published",
"source_last_modified",
)
class ExperimentDetail(generics.RetrieveAPIView):
""" Retrieve details for an experiment given it's accession code """
lookup_field = "accession_code"
queryset = Experiment.public_objects.all()
serializer_class = DetailedExperimentSerializer
##
# Samples
##
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="dataset_id",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns samples that are added to a dataset.",
),
openapi.Parameter(
name="experiment_accession_code",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns only the samples associated with an experiment accession code.",
),
openapi.Parameter(
name="accession_codes",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Provide a list of sample accession codes separated by commas and the endpoint will only return information about these samples.",
),
]
),
)
class SampleList(generics.ListAPIView):
""" Returns detailed information about Samples """
model = Sample
serializer_class = DetailedSampleSerializer
filter_backends = (filters.OrderingFilter, DjangoFilterBackend)
ordering_fields = "__all__"
ordering = "-is_processed"
filterset_fields = (
"title",
"organism",
"source_database",
"source_archive_url",
"has_raw",
"platform_name",
"technology",
"manufacturer",
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
"is_processed",
"is_public",
)
def get_queryset(self):
"""
ref https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
"""
queryset = (
Sample.public_objects.prefetch_related("organism")
.prefetch_related("results")
.prefetch_related("results__processor")
.prefetch_related("results__computationalresultannotation_set")
.prefetch_related("results__computedfile_set")
.filter(**self.get_query_params_filters())
)
# case insensitive search https://docs.djangoproject.com/en/2.1/ref/models/querysets/#icontains
filter_by = self.request.query_params.get("filter_by", None)
if filter_by:
queryset = queryset.filter(
Q(accession_code__icontains=filter_by)
| Q(title__icontains=filter_by)
| Q(sex__icontains=filter_by)
| Q(age__icontains=filter_by)
| Q(specimen_part__icontains=filter_by)
| Q(genotype__icontains=filter_by)
| Q(disease__icontains=filter_by)
| Q(disease_stage__icontains=filter_by)
| Q(cell_line__icontains=filter_by)
| Q(treatment__icontains=filter_by)
| Q(race__icontains=filter_by)
| Q(subject__icontains=filter_by)
| Q(compound__icontains=filter_by)
| Q(time__icontains=filter_by)
)
return queryset
def get_query_params_filters(self):
""" We do advanced filtering on the queryset depending on the query parameters.
This returns the parameters that should be used for that. """
filter_dict = dict()
ids = self.request.query_params.get("ids", None)
if ids is not None:
ids = [int(x) for x in ids.split(",")]
filter_dict["pk__in"] = ids
experiment_accession_code = self.request.query_params.get("experiment_accession_code", None)
if experiment_accession_code:
experiment = get_object_or_404(
Experiment.objects.values("id"), accession_code=experiment_accession_code
)
filter_dict["experiments__in"] = [experiment["id"]]
accession_codes = self.request.query_params.get("accession_codes", None)
if accession_codes:
accession_codes = accession_codes.split(",")
filter_dict["accession_code__in"] = accession_codes
dataset_id = self.request.query_params.get("dataset_id", None)
if dataset_id:
dataset = get_object_or_404(Dataset, id=dataset_id)
# Python doesn't provide a prettier way of doing this that I know about.
filter_dict["accession_code__in"] = [
item for sublist in dataset.data.values() for item in sublist
]
# Accept Organism in both name and ID form
organism_name = self.request.query_params.get("organism__name", None)
if organism_name:
filter_dict["organism__name"] = organism_name
return filter_dict
class SampleDetail(generics.RetrieveAPIView):
""" Retrieve the details for a Sample given it's accession code """
lookup_field = "accession_code"
queryset = Sample.public_objects.all()
serializer_class = DetailedSampleSerializer
##
# Processor
##
class ProcessorList(generics.ListAPIView):
"""List all processors."""
queryset = Processor.objects.all()
serializer_class = ProcessorSerializer
##
# Results
##
class ComputationalResultsList(generics.ListAPIView):
"""
computational_results_list
This lists all `ComputationalResult`. Each one contains meta-information about the output of a computer process. (Ex Salmon).
This can return valid S3 urls if a valid [token](#tag/token) is sent in the header `HTTP_API_KEY`.
"""
queryset = ComputationalResult.public_objects.all()
def get_serializer_class(self):
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return ComputationalResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return ComputationalResultSerializer
def filter_queryset(self, queryset):
filter_dict = self.request.query_params.dict()
filter_dict.pop("limit", None)
filter_dict.pop("offset", None)
return queryset.filter(**filter_dict)
##
# Search Filter Models
##
class OrganismList(generics.ListAPIView):
"""
Unpaginated list of all the available organisms.
"""
queryset = Organism.objects.all()
serializer_class = OrganismSerializer
paginator = None
class PlatformList(generics.ListAPIView):
"""
Unpaginated list of all the available "platform" information
"""
serializer_class = PlatformSerializer
paginator = None
def get_queryset(self):
return (
Sample.public_objects.all()
.values("platform_accession_code", "platform_name")
.distinct()
)
class InstitutionList(generics.ListAPIView):
"""
Unpaginated list of all the available "institution" information
"""
serializer_class = InstitutionSerializer
paginator = None
def get_queryset(self):
return Experiment.public_objects.all().values("submitter_institution").distinct()
##
# Jobs
##
class SurveyJobList(generics.ListAPIView):
"""
List of all SurveyJob.
"""
model = SurveyJob
queryset = SurveyJob.objects.all()
serializer_class = SurveyJobSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = SurveyJobSerializer.Meta.fields
ordering_fields = ("id", "created_at")
ordering = ("-id",)
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="sample_accession_code",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="List the downloader jobs associated with a sample",
),
openapi.Parameter(
name="nomad",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Only return jobs that are in the nomad queue currently",
),
]
),
)
class DownloaderJobList(generics.ListAPIView):
"""
List of all DownloaderJob
"""
model = DownloaderJob
serializer_class = DownloaderJobSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = DownloaderJobSerializer.Meta.fields
ordering_fields = ("id", "created_at")
ordering = ("-id",)
def get_queryset(self):
queryset = DownloaderJob.objects.all()
sample_accession_code = self.request.query_params.get("sample_accession_code", None)
if sample_accession_code:
queryset = queryset.filter(
original_files__samples__accession_code=sample_accession_code
).distinct()
nomad = self.request.query_params.get("nomad", None)
if nomad:
running_nomad_jobs_ids = [
job["ID"] for job in get_nomad_jobs() if job["Status"] == "running"
]
queryset = queryset.filter(nomad_job_id__in=running_nomad_jobs_ids)
return queryset
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="sample_accession_code",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="List the processor jobs associated with a sample",
),
openapi.Parameter(
name="nomad",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Only return jobs that are in the nomad queue currently",
),
]
),
)
class ProcessorJobList(generics.ListAPIView):
"""
List of all ProcessorJobs.
"""
model = ProcessorJob
serializer_class = ProcessorJobSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = ProcessorJobSerializer.Meta.fields
ordering_fields = ("id", "created_at")
ordering = ("-id",)
def get_queryset(self):
queryset = ProcessorJob.objects.all()
sample_accession_code = self.request.query_params.get("sample_accession_code", None)
if sample_accession_code:
queryset = queryset.filter(
original_files__samples__accession_code=sample_accession_code
).distinct()
nomad = self.request.query_params.get("nomad", None)
if nomad:
running_nomad_jobs_ids = [
job["ID"] for job in get_nomad_jobs() if job["Status"] == "running"
]
queryset = queryset.filter(nomad_job_id__in=running_nomad_jobs_ids)
return queryset
###
# Statistics
###
def get_start_date(range_param):
current_date = datetime.now(tz=timezone.utc)
return {
"day": current_date - timedelta(days=1),
"week": current_date - timedelta(weeks=1),
"month": current_date - timedelta(days=30),
"year": current_date - timedelta(days=365),
}.get(range_param)
def paginate_queryset_response(queryset, request):
paginator = LimitOffsetPagination()
page_items = paginator.paginate_queryset(queryset, request)
return Response(
data={
"results": [x.to_dict() for x in page_items],
"limit": paginator.limit,
"offset": paginator.offset,
"count": paginator.count,
},
status=status.HTTP_200_OK,
)
class FailedDownloaderJobStats(APIView):
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="range",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=("day", "week", "month", "year",),
)
]
)
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop("range", "day")
start_date = get_start_date(range_param)
jobs = (
DownloaderJob.objects.filter(created_at__gt=start_date)
.annotate(reason=Left("failure_reason", 80))
.values("reason")
.annotate(
job_count=Count("reason"),
sample_count=Count(
"original_files__samples",
distinct=True,
filter=Q(original_files__samples__is_processed=False),
),
)
.order_by("-job_count")
)
return paginate_queryset_response(jobs, request)
class FailedProcessorJobStats(APIView):
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="range",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=("day", "week", "month", "year",),
)
]
)
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop("range", "day")
start_date = get_start_date(range_param)
jobs = (
ProcessorJob.objects.filter(created_at__gt=start_date)
.annotate(reason=Left("failure_reason", 80))
.values("reason")
.annotate(
job_count=Count("reason"),
sample_count=Count(
"original_files__samples",
distinct=True,
filter=Q(original_files__samples__is_processed=False),
),
)
.order_by("-job_count")
)
return paginate_queryset_response(jobs, request)
class AboutStats(APIView):
""" Returns general stats for the site, used in the about page """
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
# static values for now
dummy = request.query_params.dict().pop("dummy", None)
if dummy:
# add a dummy response, calculated these on 09/25/2019
result = {
"samples_available": 904953 + 391022,
"total_size_in_bytes": 832195361132962,
"supported_organisms": 43 + 159,
"experiments_processed": 35785 + 8661,
}
return Response(result)
result = {
"samples_available": self._get_samples_available(),
"total_size_in_bytes": OriginalFile.objects.aggregate(total_size=Sum("size_in_bytes"))[
"total_size"
],
"supported_organisms": self._get_supported_organisms(),
"experiments_processed": self._get_experiments_processed(),
}
return Response(result)
def _get_experiments_processed(self):
""" total experiments with at least one sample processed """
experiments_with_sample_processed = (
Experiment.objects.annotate(
processed_samples_count=Count("samples", filter=Q(samples__is_processed=True)),
)
.filter(Q(processed_samples_count__gt=1))
.count()
)
experiments_with_sample_quant = (
ComputedFile.objects.filter(filename="quant.sf", result__samples__is_processed=False)
.values_list("result__samples__experiments", flat=True)
.distinct()
.count()
)
return experiments_with_sample_processed + experiments_with_sample_quant
def _get_supported_organisms(self):
""" count organisms with qn targets or that have at least one sample with quant files """
organisms_with_qn_targets = Organism.objects.filter(qn_target__isnull=False).count()
organisms_without_qn_targets = (
Organism.objects.filter(
qn_target__isnull=True,
sample__is_processed=False,
sample__technology="RNA-SEQ",
sample__results__computedfile__filename="quant.sf",
)
.distinct()
.count()
)
return organisms_with_qn_targets + organisms_without_qn_targets
def _get_samples_available(self):
""" count the total number of samples that are processed or that have a quant.sf file associated with them """
processed_samples = Sample.objects.filter(is_processed=True).count()
unprocessed_samples_with_quant = (
Sample.objects.filter(
is_processed=False, technology="RNA-SEQ", results__computedfile__filename="quant.sf"
)
.distinct()
.count()
)
return processed_samples + unprocessed_samples_with_quant
class Stats(APIView):
""" Statistics about the health of the system. """
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="range",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=("day", "week", "month", "year",),
)
]
)
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop("range", None)
cached_stats = Stats.calculate_stats(range_param)
return Response(cached_stats)
@classmethod
def calculate_stats(cls, range_param):
data = {}
data["generated_on"] = timezone.now()
data["survey_jobs"] = cls._get_job_stats(SurveyJob.objects, range_param)
data["downloader_jobs"] = cls._get_job_stats(DownloaderJob.objects, range_param)
data["processor_jobs"] = cls._get_job_stats(ProcessorJob.objects, range_param)
data["experiments"] = cls._get_object_stats(Experiment.objects, range_param)
# processed and unprocessed samples stats
data["unprocessed_samples"] = cls._get_object_stats(
Sample.objects.filter(is_processed=False), range_param, "last_modified"
)
data["processed_samples"] = cls._get_object_stats(
Sample.processed_objects, range_param, "last_modified"
)
data["processed_samples"]["last_hour"] = cls._samples_processed_last_hour()
data["processed_samples"]["technology"] = {}
techs = Sample.processed_objects.values("technology").annotate(count=Count("technology"))
for tech in techs:
if not tech["technology"] or not tech["technology"].strip():
continue
data["processed_samples"]["technology"][tech["technology"]] = tech["count"]
data["processed_samples"]["organism"] = {}
organisms = Sample.processed_objects.values("organism__name").annotate(
count=Count("organism__name")
)
for organism in organisms:
if not organism["organism__name"]:
continue
data["processed_samples"]["organism"][organism["organism__name"]] = organism["count"]
data["processed_experiments"] = cls._get_object_stats(Experiment.processed_public_objects)
data["active_volumes"] = list(get_active_volumes())
data["dataset"] = cls._get_dataset_stats(range_param)
if range_param:
data["input_data_size"] = cls._get_input_data_size()
data["output_data_size"] = cls._get_output_data_size()
data.update(get_nomad_jobs_breakdown())
return data
EMAIL_USERNAME_BLACKLIST = [
"arielsvn",
"cansav09",
"d.prasad",
"daniel.himmelstein",
"dv.prasad991",
"greenescientist",
"jaclyn.n.taroni",
"kurt.wheeler91",
"michael.zietz",
"miserlou",
]
@classmethod
def _get_dataset_stats(cls, range_param):
"""Returns stats for processed datasets"""
filter_query = Q()
for username in Stats.EMAIL_USERNAME_BLACKLIST:
filter_query = filter_query | Q(email_address__startswith=username)
filter_query = filter_query | Q(email_address__endswith="@alexslemonade.org")
processed_datasets = Dataset.objects.filter(
is_processed=True, email_address__isnull=False
).exclude(filter_query)
result = processed_datasets.aggregate(
total=Count("id"),
aggregated_by_experiment=Count("id", filter=Q(aggregate_by="EXPERIMENT")),
aggregated_by_species=Count("id", filter=Q(aggregate_by="SPECIES")),
scale_by_none=Count("id", filter=Q(scale_by="NONE")),
scale_by_minmax=Count("id", filter=Q(scale_by="MINMAX")),
scale_by_standard=Count("id", filter=Q(scale_by="STANDARD")),
scale_by_robust=Count("id", filter=Q(scale_by="ROBUST")),
)
if range_param:
# We don't save the dates when datasets are processed, but we can use
# `last_modified`, since datasets aren't modified again after they are processed
result["timeline"] = cls._get_intervals(
processed_datasets, range_param, "last_modified"
).annotate(total=Count("id"), total_size=Sum("size_in_bytes"))
return result
@classmethod
def _samples_processed_last_hour(cls):
current_date = datetime.now(tz=timezone.utc)
start = current_date - timedelta(hours=1)
return Sample.processed_objects.filter(last_modified__range=(start, current_date)).count()
@classmethod
def _get_input_data_size(cls):
total_size = OriginalFile.objects.filter(sample__is_processed=True).aggregate( # <-- SLOW
Sum("size_in_bytes")
)
return total_size["size_in_bytes__sum"] if total_size["size_in_bytes__sum"] else 0
@classmethod
def _get_output_data_size(cls):
total_size = (
ComputedFile.public_objects.all()
.filter(s3_bucket__isnull=False, s3_key__isnull=True)
.aggregate(Sum("size_in_bytes"))
)
return total_size["size_in_bytes__sum"] if total_size["size_in_bytes__sum"] else 0
@classmethod
def _get_job_stats(cls, jobs, range_param):
start_filter = Q()
if range_param:
start_date = get_start_date(range_param)
start_filter = start_filter | Q(start_time__gte=start_date) | Q(start_time__isnull=True)
result = jobs.filter(start_filter).aggregate(
total=Count("id"),
successful=Count("id", filter=Q(success=True)),
failed=Count("id", filter=Q(success=False)),
pending=Count(
"id",
filter=Q(
start_time__isnull=True,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF,
),
),
open=Count(
"id",
filter=Q(
start_time__isnull=False,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF,
),
),
)
# via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
result["average_time"] = (
jobs.filter(start_filter)
.filter(start_time__isnull=False, end_time__isnull=False, success=True)
.aggregate(average_time=Avg(F("end_time") - F("start_time")))["average_time"]
)
if not result["average_time"]:
result["average_time"] = 0
else:
result["average_time"] = result["average_time"].total_seconds()
if range_param:
result["timeline"] = cls._get_intervals(jobs, range_param).annotate(
total=Count("id"),
successful=Count("id", filter=Q(success=True)),
failed=Count("id", filter=Q(success=False)),
pending=Count("id", filter=Q(start_time__isnull=True, success__isnull=True)),
open=Count("id", filter=Q(start_time__isnull=False, success__isnull=True)),
)
return result
@classmethod
def _get_object_stats(cls, objects, range_param=False, field="created_at"):
result = {"total": objects.count()}
if range_param:
result["timeline"] = cls._get_intervals(objects, range_param, field).annotate(
total=Count("id")
)
return result
@classmethod
def _get_intervals(cls, objects, range_param, field="last_modified"):
range_to_trunc = {"day": "hour", "week": "day", "month": "day", "year": "month"}
# truncate the parameterized field so it can be annotated by range
# ie. each day is composed of 24 hours...
start_trunc = Trunc(field, range_to_trunc.get(range_param), output_field=DateTimeField())
# get the correct start time for the range
start_range = get_start_date(range_param)
# annotate and filter in a single query
# ref https://stackoverflow.com/a/38359913/763705
return objects.annotate(start=start_trunc).values("start").filter(start__gte=start_range)
###
# Transcriptome Indices
###
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="organism_name",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name="length",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Short hand for `index_type` Eg. `short` or `long`",
),
openapi.Parameter(
name="salmon_version",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Eg. `salmon 0.13.1`",
),
openapi.Parameter(
name="index_type",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Eg. `TRANSCRIPTOME_LONG`",
),
]
),
)
class TranscriptomeIndexList(generics.ListAPIView):
"""
List all Transcriptome Indices. These are a special type of process result,
necessary for processing other SRA samples.
"""
serializer_class = OrganismIndexSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = ["salmon_version", "index_type"]
ordering_fields = ("created_at", "salmon_version")
ordering = ("-created_at",)
def get_queryset(self):
queryset = OrganismIndex.public_objects.all()
organism_name = self.request.GET.get("organism_name", None)
if organism_name is not None:
queryset = queryset.filter(organism__name=organism_name.upper())
length = self.request.GET.get("length", None)
if length is not None:
index_type = "TRANSCRIPTOME_{}".format(length.upper())
queryset = queryset.filter(index_type=index_type)
return queryset
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="id",
in_=openapi.IN_PATH,
type=openapi.TYPE_NUMBER,
description="Transcriptome Index Id eg `1`",
),
]
),
)
class TranscriptomeIndexDetail(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
lookup_field = "id"
queryset = OrganismIndex.public_objects.all()
###
# Compendia
###
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="latest_version",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="`True` will only return the highest `compendium_version` for each primary_organism.",
),
openapi.Parameter(
name="quant_sf_only",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="`True` for RNA-seq Sample Compendium results or `False` for quantile normalized.",
),
]
),
)
class CompendiumResultList(generics.ListAPIView):
"""
List all CompendiaResults with filtering.
"""
model = CompendiumResult
queryset = CompendiumResult.objects.all()
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = ["primary_organism__name", "compendium_version", "quant_sf_only"]
ordering_fields = ("primary_organism__name", "compendium_version", "id")
ordering = ("primary_organism__name",)
def get_queryset(self):
public_result_queryset = CompendiumResult.objects.filter(result__is_public=True)
latest_version = self.request.query_params.get("latest_version", False)
if latest_version:
version_filter = Q(
primary_organism=OuterRef("primary_organism"),
quant_sf_only=OuterRef("quant_sf_only"),
)
latest_version = (
public_result_queryset.filter(version_filter)
.order_by("-compendium_version")
.values("compendium_version")
)
return public_result_queryset.annotate(
latest_version=Subquery(latest_version[:1])
).filter(compendium_version=F("latest_version"))
return public_result_queryset
def get_serializer_class(self):
try:
token_id = self.request.META.get("HTTP_API_KEY", None)
token = APIToken.objects.get(id=token_id, is_activated=True)
return CompendiumResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return CompendiumResultSerializer
class CompendiumResultDetails(generics.RetrieveAPIView):
"""
Get a specific Compendium Result
"""
model = CompendiumResult
queryset = CompendiumResult.objects.filter(is_public=True)
lookup_field = "id"
def get_serializer_class(self):
try:
token_id = self.request.META.get("HTTP_API_KEY", None)
token = APIToken.objects.get(id=token_id, is_activated=True)
return CompendiumResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return CompendiumResultSerializer
###
# QN Targets
###
class QNTargetsAvailable(generics.ListAPIView):
"""
This is a list of all of the organisms which have available QN Targets
"""
serializer_class = OrganismSerializer
paginator = None
def get_queryset(self):
return Organism.get_objects_with_qn_targets()
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="organism_name",
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
description="Eg `DANIO_RERIO`, `MUS_MUSCULUS`",
)
],
responses={404: "QN Target not found for the given organism."},
),
)
class QNTargetsDetail(generics.RetrieveAPIView):
"""
Get a detailed view of the Quantile Normalization file for an organism.
"""
serializer_class = QNTargetSerializer
def get_object(self):
organism = self.kwargs["organism_name"]
organism = organism.upper().replace(" ", "_")
try:
organism_id = Organism.get_object_for_name(organism).id
annotation = (
ComputationalResultAnnotation.objects.filter(
data__organism_id=organism_id, data__is_qn=True
)
.order_by("-created_at")
.first()
)
qn_target = annotation.result.computedfile_set.first()
except Exception:
raise NotFound("Don't have a target for that organism!")
if not qn_target:
raise NotFound("Don't have a target for that organism!!")
return qn_target
##
# Computed Files
##
class ComputedFilesList(generics.ListAPIView):
"""
computed_files_list
ComputedFiles are representation of files created by data-refinery processes.
This can also be used to fetch all the compendia files we have generated with:
```
GET /computed_files?is_compendia=True&is_public=True
```
"""
queryset = ComputedFile.objects.all()
serializer_class = ComputedFileListSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = (
"id",
"samples",
"is_qn_target",
"is_smashable",
"is_qc",
"is_compendia",
"quant_sf_only",
"svd_algorithm",
"compendia_version",
"created_at",
"last_modified",
)
ordering_fields = (
"id",
"created_at",
"last_modified",
"compendia_version",
)
ordering = ("-id",)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(ComputedFilesList, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
class OriginalFileList(generics.ListAPIView):
"""
original_files_list
List Original Files that are associated with Samples. These are the files we proccess.
"""
queryset = OriginalFile.objects.all()
serializer_class = OriginalFileListSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = OriginalFileListSerializer.Meta.fields
ordering_fields = (
"id",
"created_at",
"last_modified",
)
ordering = ("-id",)
# error handlers
def handle404error(request, exception):
message = "The requested resource was not found on this server."
url = "https://api.refine.bio/"
# check to see if the 404ed request contained a version
if not match(r"^/v[1-9]/.*", request.path):
message = "refine.bio API resources are only available through versioned requests."
return JsonResponse({"message": message, "docs": url, "status_code": 404,}, status=404)
def handle500error(request):
return JsonResponse(
{"message": "A server error occured. This has been reported.", "status_code": 500,},
status=500,
)
##
# Util
##
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
<|code_end|>
common/data_refinery_common/models/models.py
<|code_start|>import hashlib
import io
import os
import shutil
import pytz
import uuid
import boto3
from botocore.client import Config
from datetime import datetime
from functools import partial
from typing import Dict, Set
from django.db.models import Count, Prefetch, DateTimeField
from django.db.models.expressions import F, Q
from django.conf import settings
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import transaction
from django.db import models
from django.utils import timezone
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models.organism import Organism
from data_refinery_common.utils import (
get_env_variable,
get_s3_url,
calculate_file_size,
calculate_sha1,
FileUtils,
)
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client("s3", config=Config(signature_version="s3v4"))
logger = get_and_configure_logger(__name__)
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
# We store what salmon ouptuts as its version, therefore for
# comparisions or defaults we shouldn't just store the version string,
# we need something with the pattern: 'salmon X.X.X'
CURRENT_SALMON_VERSION = "salmon " + get_env_variable("SALMON_VERSION", "0.13.1")
CHUNK_SIZE = 1024 * 256 # chunk_size is in bytes
"""
# First Order Classes
This represent the primary data types we will be querying
and filtering against.
"""
class PublicObjectsManager(models.Manager):
"""
Only returns objects that have is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class ProcessedObjectsManager(models.Manager):
"""
Only returns objects that have is_processed and is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_processed=True, is_public=True)
class Sample(models.Model):
"""
An individual sample.
"""
class Meta:
db_table = "samples"
base_manager_name = "public_objects"
get_latest_by = "created_at"
indexes = [
models.Index(fields=["accession_code"]),
]
def __str__(self):
return self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_objects = ProcessedObjectsManager()
# Identifiers
accession_code = models.CharField(max_length=255, unique=True)
title = models.CharField(max_length=255, unique=False, blank=True)
# Relations
organism = models.ForeignKey(Organism, blank=True, null=True, on_delete=models.SET_NULL)
results = models.ManyToManyField("ComputationalResult", through="SampleResultAssociation")
original_files = models.ManyToManyField("OriginalFile", through="OriginalFileSampleAssociation")
computed_files = models.ManyToManyField("ComputedFile", through="SampleComputedFileAssociation")
experiments = models.ManyToManyField("Experiment", through="ExperimentSampleAssociation")
# Historical Properties
source_database = models.CharField(max_length=255, blank=False)
source_archive_url = models.CharField(max_length=255)
source_filename = models.CharField(max_length=255, blank=False)
source_absolute_file_path = models.CharField(max_length=255)
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Technological Properties
platform_accession_code = models.CharField(max_length=256, blank=True)
platform_name = models.CharField(max_length=256, blank=True)
technology = models.CharField(max_length=256, blank=True) # MICROARRAY, RNA-SEQ
manufacturer = models.CharField(max_length=256, blank=True)
protocol_info = JSONField(default=dict)
# Scientific Properties
sex = models.CharField(max_length=255, blank=True)
age = models.DecimalField(max_length=255, blank=True, max_digits=8, decimal_places=3, null=True)
specimen_part = models.CharField(max_length=255, blank=True)
genotype = models.CharField(max_length=255, blank=True)
disease = models.CharField(max_length=255, blank=True)
disease_stage = models.CharField(max_length=255, blank=True)
cell_line = models.CharField(max_length=255, blank=True)
treatment = models.CharField(max_length=255, blank=True)
race = models.CharField(max_length=255, blank=True)
subject = models.CharField(max_length=255, blank=True)
compound = models.CharField(max_length=255, blank=True)
time = models.CharField(max_length=255, blank=True)
# Crunch Properties
is_processed = models.BooleanField(default=False)
# Blacklisting
is_blacklisted = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Sample, self).save(*args, **kwargs)
def to_metadata_dict(self):
"""Render this Sample as a dict."""
metadata = {}
metadata["refinebio_title"] = self.title
metadata["refinebio_accession_code"] = self.accession_code
metadata["refinebio_organism"] = self.organism.name if self.organism else None
metadata["refinebio_source_database"] = self.source_database
metadata["refinebio_source_archive_url"] = self.source_archive_url
metadata["refinebio_sex"] = self.sex
metadata["refinebio_age"] = self.age or ""
metadata["refinebio_specimen_part"] = self.specimen_part
metadata["refinebio_genetic_information"] = self.genotype
metadata["refinebio_disease"] = self.disease
metadata["refinebio_disease_stage"] = self.disease_stage
metadata["refinebio_cell_line"] = self.cell_line
metadata["refinebio_treatment"] = self.treatment
metadata["refinebio_race"] = self.race
metadata["refinebio_subject"] = self.subject
metadata["refinebio_compound"] = self.compound
metadata["refinebio_time"] = self.time
metadata["refinebio_platform"] = self.pretty_platform
metadata["refinebio_annotations"] = [
data for data in self.sampleannotation_set.all().values_list("data", flat=True)
]
return metadata
# Returns a set of ProcessorJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_processor_jobs(self) -> Set:
processor_jobs = set()
for original_file in self.original_files.prefetch_related("processor_jobs").all():
for processor_job in original_file.processor_jobs.all():
processor_jobs.add(processor_job)
return processor_jobs
# Returns a set of DownloaderJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_downloader_jobs(self) -> Set:
downloader_jobs = set()
for original_file in self.original_files.prefetch_related("downloader_jobs").all():
for downloader_job in original_file.downloader_jobs.all():
downloader_jobs.add(downloader_job)
return downloader_jobs
def get_result_files(self):
""" Get all of the ComputedFile objects associated with this Sample """
return self.computed_files.all()
def get_most_recent_smashable_result_file(self):
""" Get the most recent of the ComputedFile objects associated with this Sample """
try:
latest_computed_file = self.computed_files.filter(
is_public=True, is_smashable=True,
).latest()
return latest_computed_file
except ComputedFile.DoesNotExist as e:
# This sample has no smashable files yet.
return None
def get_most_recent_quant_sf_file(self):
""" Returns the latest quant.sf file that was generated for this sample.
Note: We don't associate that file to the computed_files of this sample, that's
why we have to go through the computational results. """
return (
ComputedFile.objects.filter(
result__in=self.results.all(),
filename="quant.sf",
s3_key__isnull=False,
s3_bucket__isnull=False,
)
.order_by("-created_at")
.first()
)
@property
def pretty_platform(self):
""" Turns
[HT_HG-U133_Plus_PM] Affymetrix HT HG-U133+ PM Array Plate
into
Affymetrix HT HG-U133+ PM Array Plate (hthgu133pluspm)
"""
if "]" in self.platform_name:
platform_base = self.platform_name.split("]")[1].strip()
else:
platform_base = self.platform_name
return platform_base + " (" + self.platform_accession_code + ")"
class SampleAnnotation(models.Model):
""" Semi-standard information associated with a Sample """
class Meta:
db_table = "sample_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(SampleAnnotation, self).save(*args, **kwargs)
class ProcessedPublicObjectsManager(models.Manager):
"""
Only returns Experiments that are is_public and have related is_processed Samples.
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True, num_processed_samples__gt=0)
class Experiment(models.Model):
""" An Experiment or Study """
class Meta:
db_table = "experiments"
base_manager_name = "public_objects"
def __str__(self):
return "Experiment: " + self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_public_objects = ProcessedPublicObjectsManager()
# Relations
samples = models.ManyToManyField("Sample", through="ExperimentSampleAssociation")
organisms = models.ManyToManyField("Organism", through="ExperimentOrganismAssociation")
# Identifiers
accession_code = models.CharField(max_length=64, unique=True)
alternate_accession_code = models.CharField(max_length=64, unique=True, null=True)
# Historical Properties
source_database = models.CharField(max_length=32) # "ArrayExpress, "SRA", "GEO"
source_url = models.TextField()
# Properties
# I was always under the impression that TextFields were slower
# than CharFields, however the Postgres documentation disagrees:
# https://www.postgresql.org/docs/9.0/static/datatype-character.html
title = models.TextField()
description = models.TextField()
protocol_description = JSONField(default=dict)
technology = models.CharField(max_length=256, blank=True)
submitter_institution = models.CharField(max_length=256, blank=True)
has_publication = models.BooleanField(default=False)
publication_title = models.TextField(default="")
publication_doi = models.CharField(max_length=64, blank=True)
publication_authors = ArrayField(models.TextField(), default=list)
pubmed_id = models.CharField(max_length=32, blank=True)
source_first_published = models.DateTimeField(null=True)
source_last_modified = models.DateTimeField(null=True)
# Cached Computed Properties
num_total_samples = models.IntegerField(default=0)
num_processed_samples = models.IntegerField(default=0)
num_downloadable_samples = models.IntegerField(default=0)
sample_metadata_fields = ArrayField(models.TextField(), default=list)
platform_names = ArrayField(models.TextField(), default=list)
platform_accession_codes = ArrayField(models.TextField(), default=list)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
if self.accession_code and not self.alternate_accession_code:
if self.accession_code.startswith("GSE"):
self.alternate_accession_code = "E-GEOD-" + self.accession_code[3:]
elif self.accession_code.startswith("E-GEOD-"):
self.alternate_accession_code = "GSE" + self.accession_code[7:]
return super(Experiment, self).save(*args, **kwargs)
def update_num_samples(self):
""" Update our cache values """
aggregates = self.samples.aggregate(
num_total_samples=Count("id"),
num_processed_samples=Count("id", filter=Q(is_processed=True)),
num_downloadable_samples=Count(
"id", filter=Q(is_processed=True, organism__qn_target__isnull=False)
),
)
self.num_total_samples = aggregates["num_total_samples"]
self.num_processed_samples = aggregates["num_processed_samples"]
self.num_downloadable_samples = aggregates["num_downloadable_samples"]
self.save()
def to_metadata_dict(self):
""" Render this Experiment as a dict """
metadata = {}
metadata["title"] = self.title
metadata["accession_code"] = self.accession_code
metadata["organisms"] = list(self.organisms.all().values_list("name", flat=True))
metadata["sample_accession_codes"] = list(
self.samples.all().values_list("accession_code", flat=True)
)
metadata["description"] = self.description
metadata["protocol_description"] = self.protocol_description
metadata["technology"] = self.technology
metadata["submitter_institution"] = self.submitter_institution
metadata["has_publication"] = self.has_publication
metadata["publication_title"] = self.publication_title
metadata["publication_doi"] = self.publication_doi
metadata["pubmed_id"] = self.pubmed_id
if self.source_first_published:
metadata["source_first_published"] = self.source_first_published.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_first_published"] = ""
if self.source_last_modified:
metadata["source_last_modified"] = self.source_last_modified.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_last_modified"] = ""
return metadata
def get_sample_metadata_fields(self):
""" Get all metadata fields that are non-empty for at least one sample in the experiment.
See https://github.com/AlexsLemonade/refinebio-frontend/issues/211 for why this is needed.
"""
fields = []
possible_fields = [
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
]
samples = self.samples.all()
for field in possible_fields:
for sample in samples:
if getattr(sample, field) != None and getattr(sample, field) != "":
fields.append(field)
break
return fields
def update_sample_metadata_fields(self):
self.sample_metadata_fields = self.get_sample_metadata_fields()
def update_platform_names(self):
self.platform_names = self.get_platform_names()
self.platform_accession_codes = self.get_platform_accession_codes()
def get_sample_technologies(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([sample.technology for sample in self.samples.all()]))
def get_platform_names(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_name for sample in self.samples.all()]))
def get_platform_accession_codes(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_accession_code for sample in self.samples.all()]))
@property
def platforms(self):
""" Returns a list of related pipelines """
return list(set([sample.platform_name for sample in self.samples.all()]))
@property
def pretty_platforms(self):
""" Returns a prettified list of related pipelines """
return list(set([sample.pretty_platform for sample in self.samples.all()]))
@property
def processed_samples(self):
return list(
[sample.accession_code for sample in self.samples.all() if sample.is_processed == True]
)
@property
def organism_names(self):
""" Get a list of unique organism names that has at least one downloadable sample """
result = (
self.samples.filter(is_processed=True, organism__qn_target__isnull=False)
.values_list("organism__name", flat=True)
.distinct()
)
return list(result)
@property
def downloadable_samples(self):
"""
Returns the accession codes of the downloadable samples in this experiment.
This is indexed on elastic search and used to count the number of samples
on the filters.
"""
return list(
self.samples.filter(is_processed=True, organism__qn_target__isnull=False).values_list(
"accession_code", flat=True
)
)
class ExperimentAnnotation(models.Model):
""" Semi-standard information associated with an Experiment """
class Meta:
db_table = "experiment_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ExperimentAnnotation, self).save(*args, **kwargs)
class Pipeline(models.Model):
"""Pipeline that is associated with a series of ComputationalResult records."""
name = models.CharField(max_length=255)
steps = ArrayField(models.IntegerField(), default=list)
class Meta:
db_table = "pipelines"
class Processor(models.Model):
"""Processor associated with a certain ComputationalResult."""
name = models.CharField(max_length=255)
version = models.CharField(max_length=64)
docker_image = models.CharField(max_length=255)
environment = JSONField(default=dict)
class Meta:
db_table = "processors"
unique_together = ("name", "version", "docker_image", "environment")
def __str__(self):
return "Processor: %s (version: %s, docker_image: %s)" % (
self.name,
self.version,
self.docker_image,
)
class ComputationalResult(models.Model):
""" Meta-information about the output of a computer process. (Ex Salmon) """
class Meta:
db_table = "computational_results"
base_manager_name = "public_objects"
def __str__(self):
processor_name_str = ""
if self.processor:
processor_name_str = ": " + str(self.processor.name)
return "ComputationalResult " + str(self.pk) + processor_name_str
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
commands = ArrayField(models.TextField(), default=list)
processor = models.ForeignKey(Processor, blank=True, null=True, on_delete=models.CASCADE)
samples = models.ManyToManyField("Sample", through="SampleResultAssociation")
# The Organism Index used to process the sample.
organism_index = models.ForeignKey(
"OrganismIndex", blank=True, null=True, on_delete=models.SET_NULL
)
is_ccdl = models.BooleanField(default=True)
# Stats
time_start = models.DateTimeField(blank=True, null=True)
time_end = models.DateTimeField(blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResult, self).save(*args, **kwargs)
def remove_computed_files_from_s3(self):
""" Removes all associated computed files from S3. Use this before deleting a computational result. """
for computed_file in self.computedfile_set.all():
computed_file.delete_s3_file()
class ComputationalResultAnnotation(models.Model):
""" Non-standard information associated with an ComputationalResult """
class Meta:
db_table = "computational_result_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResultAnnotation, self).save(*args, **kwargs)
# Compendium Computational Result
class CompendiumResult(models.Model):
""" Computational Result For A Compendium """
class Meta:
db_table = "compendium_results"
base_manager_name = "public_objects"
def __str__(self):
return "CompendiumResult " + str(self.pk)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult,
blank=False,
null=False,
related_name="compendium_result",
on_delete=models.CASCADE,
)
primary_organism = models.ForeignKey(
Organism,
blank=False,
null=False,
related_name="primary_compendium_results",
on_delete=models.CASCADE,
)
organisms = models.ManyToManyField(
Organism, related_name="compendium_results", through="CompendiumResultOrganismAssociation"
)
# Properties
quant_sf_only = models.BooleanField(default=False)
compendium_version = models.IntegerField(blank=True, null=True)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="The SVD algorithm that was used to impute the compendium result.",
)
# Common Properties
is_public = models.BooleanField(default=True)
# helper
def get_computed_file(self):
""" Short hand method for getting the computed file for this compendium"""
return ComputedFile.objects.filter(result=self.result).first()
# TODO
# class Gene(models.Model):
""" A representation of a Gene """
# class Meta:
# db_table = "genes"
class OrganismIndex(models.Model):
""" A special type of process result, necessary for processing other SRA samples """
class Meta:
db_table = "organism_index"
base_manager_name = "public_objects"
def __str__(self):
return (
"OrganismIndex "
+ str(self.pk)
+ ": "
+ self.organism.name
+ " ["
+ self.index_type
+ "] - "
+ str(self.salmon_version)
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# ex., "TRANSCRIPTOME_LONG", "TRANSCRIPTOME_SHORT"
index_type = models.CharField(max_length=255)
# This corresponds to Ensembl's release number:
# http://ensemblgenomes.org/info/about/release_cycle
# Determined by hitting:
# http://rest.ensembl.org/info/software?content-type=application/json
source_version = models.CharField(max_length=255, default="93")
# The name of the genome assembly used which corresponds to 'GRCh38' in:
# ftp://ftp.ensembl.org/pub/release-93/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz
assembly_name = models.CharField(max_length=255, default="UNKNOWN")
# This matters, for instance salmon 0.9.0 indexes don't work with 0.10.0
salmon_version = models.CharField(max_length=255, default=CURRENT_SALMON_VERSION)
# We keep the director unextracted on the shared filesystem so all
# Salmon jobs can access it.
absolute_directory_path = models.CharField(max_length=255, blank=True, null=True, default="")
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_computed_file(self):
""" Short hand method for getting the computed file for this organism index"""
return self.result.computedfile_set.first()
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OrganismIndex, self).save(*args, **kwargs)
"""
# Files
These are the database representations of files
which live on local disk, on ephemeral storage,
or on AWS cloud services.
"""
class OriginalFile(models.Model):
""" A representation of a file from an external source """
class Meta:
db_table = "original_files"
indexes = [
models.Index(fields=["filename"]),
models.Index(fields=["source_filename"]),
]
def __str__(self):
return "OriginalFile: " + self.get_display_name()
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# File Properties
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
size_in_bytes = models.BigIntegerField(blank=True, null=True)
sha1 = models.CharField(max_length=64)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Relations
samples = models.ManyToManyField("Sample", through="OriginalFileSampleAssociation")
processor_jobs = models.ManyToManyField(
"data_refinery_common.ProcessorJob", through="ProcessorJobOriginalFileAssociation"
)
downloader_jobs = models.ManyToManyField(
"data_refinery_common.DownloaderJob", through="DownloaderJobOriginalFileAssociation"
)
# Historical Properties
source_url = models.TextField()
is_archive = models.BooleanField(default=True)
source_filename = models.CharField(max_length=255, blank=False)
# Scientific Properties
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Crunch Properties
is_downloaded = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OriginalFile, self).save(*args, **kwargs)
def set_downloaded(self, absolute_file_path, filename=None):
""" Marks the file as downloaded, if `filename` is not provided it will
be parsed from the `absolute_file_path` """
self.is_downloaded = True
self.is_archive = FileUtils.is_archive(absolute_file_path)
self.absolute_file_path = absolute_file_path
self.filename = filename if filename else os.path.basename(absolute_file_path)
self.calculate_size()
self.calculate_sha1()
self.save()
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def get_display_name(self):
""" For dev convenience """
if not self.filename:
return self.source_filename
else:
return self.filename
def get_extension(self):
""" Returns the lowercased extension of the filename
Thanks to https://stackoverflow.com/a/541408/763705 """
return FileUtils.get_extension(self.filename)
def is_blacklisted(self):
return self.get_extension() in [".xml", ".chp", ".exp"]
def delete_local_file(self):
""" Deletes this file from the local file system."""
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception(
"Unexpected delete file exception.", absolute_file_path=self.absolute_file_path
)
self.is_downloaded = False
self.save()
def has_blocking_jobs(self, own_processor_id=None) -> bool:
# If the file has a processor job that should not have been
# retried, then it still shouldn't be retried.
# Exclude the ones that were aborted.
no_retry_processor_jobs = self.processor_jobs.filter(no_retry=True).exclude(abort=True)
# If the file has a processor job that hasn't even started
# yet, then it doesn't need another.
incomplete_processor_jobs = self.processor_jobs.filter(
end_time__isnull=True, success__isnull=True, retried=False
)
if own_processor_id:
incomplete_processor_jobs = incomplete_processor_jobs.exclude(id=own_processor_id)
# Check if there's any jobs which should block another
# processing attempt.
blocking_jobs = no_retry_processor_jobs | incomplete_processor_jobs
return blocking_jobs.first() is not None
def needs_processing(self, own_processor_id=None) -> bool:
"""Returns False if original_file has been or is being processed.
Returns True otherwise.
If own_processor_id is supplied then it will be ignored so
that processor jobs can use this function without their job
being counted as currently processing this file.
"""
sample = self.samples.first()
if not sample:
return True
if self.has_blocking_jobs(own_processor_id):
return False
if sample.source_database == "SRA":
computed_file = sample.get_most_recent_smashable_result_file()
# If there's no smashable file then we should check the quant.sf file.
if not computed_file:
computed_file = sample.get_most_recent_quant_sf_file()
# If there's neither a quant.sf file nor a smashable file
# then we definitely need to process it.
if not computed_file:
return True
if (
computed_file.s3_bucket
and computed_file.s3_key
and computed_file.result.organism_index is not None
and computed_file.result.organism_index.salmon_version == CURRENT_SALMON_VERSION
):
# If the file wasn't computed with the latest
# version of salmon, then it should be rerun
# with the latest version of salmon.
return False
else:
# If this original_file has multiple samples (is an
# archive), and any of them haven't been processed, we'll
# need the entire archive in order to process any of them.
# A check to not re-processed the already processed
# samples in the archive will happen elsewhere before
# dispatching.
for sample in self.samples.all():
if not sample.is_processed:
return True
computed_file = sample.get_most_recent_smashable_result_file()
if not computed_file:
return True
if settings.RUNNING_IN_CLOUD and (
computed_file.s3_bucket is None or computed_file.s3_key is None
):
return True
return False
# If we aren't sure, prefer reprocessing over never processing.
return True
def needs_downloading(self, own_processor_id=None) -> bool:
"""Determine if a file needs to be downloaded.
This is true if the file has already been downloaded and lost
without getting processed.
"""
# If the file is downloaded and the file actually exists on disk,
# then it doens't need to be downloaded.
if self.absolute_file_path and os.path.exists(self.absolute_file_path):
return False
unstarted_downloader_jobs = self.downloader_jobs.filter(
start_time__isnull=True, success__isnull=True, retried=False
)
# If the file has a downloader job that hasn't even started yet,
# then it doesn't need another.
if unstarted_downloader_jobs.count() > 0:
return False
# If this file has been processed, then it doesn't need to be downloaded again.
return self.needs_processing(own_processor_id)
def is_affy_data(self) -> bool:
"""Return true if original_file is a CEL file or a gzipped CEL file.
"""
upper_name = self.source_filename.upper()
return (len(upper_name) > 4 and upper_name[-4:] == ".CEL") or (
len(upper_name) > 7 and upper_name[-7:] == ".CEL.GZ"
)
class ComputedFile(models.Model):
""" A representation of a file created by a data-refinery process """
class Meta:
db_table = "computed_files"
get_latest_by = "created_at"
indexes = [
models.Index(fields=["filename"]),
]
def __str__(self):
return "ComputedFile: " + str(self.filename)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Object relations
samples = models.ManyToManyField("Sample", through="SampleComputedFileAssociation")
# File related
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
# TODO: make this work w/ migrations:
# absolute_file_path = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField()
sha1 = models.CharField(max_length=64)
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# Scientific
is_smashable = models.BooleanField(default=False)
is_qc = models.BooleanField(default=False)
is_qn_target = models.BooleanField(default=False)
# Compendia details
quant_sf_only = models.BooleanField(default=False)
is_compendia = models.BooleanField(default=False)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="The SVD algorithm that was used to generate the file.",
)
compendia_organism = models.ForeignKey(
Organism, blank=True, null=True, on_delete=models.CASCADE
)
compendia_version = models.IntegerField(blank=True, null=True)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputedFile, self).save(*args, **kwargs)
def sync_to_s3(self, s3_bucket=None, s3_key=None) -> bool:
""" Syncs a file to AWS S3.
"""
if not settings.RUNNING_IN_CLOUD:
return True
self.s3_bucket = s3_bucket
self.s3_key = s3_key
try:
S3.upload_file(
self.absolute_file_path,
s3_bucket,
s3_key,
ExtraArgs={"ACL": "public-read", "StorageClass": "STANDARD_IA"},
)
self.save()
except Exception as e:
logger.exception(
"Error uploading computed file to S3",
computed_file_id=self.pk,
s3_key=self.s3_key,
s3_bucket=self.s3_bucket,
)
self.s3_bucket = None
self.s3_key = None
return False
return True
def sync_from_s3(self, force=False, path=None):
""" Downloads a file from S3 to the local file system.
Returns the absolute file path.
"""
path = path if path is not None else self.absolute_file_path
if not settings.RUNNING_IN_CLOUD and not force:
if os.path.exists(path):
return path
else:
# If the file doesn't exist at path and we're not
# running in the cloud, then the file is almost
# certainly at its absolute_file_path because it never got deleted.
if os.path.exists(self.absolute_file_path):
shutil.copyfile(self.absolute_file_path, path)
return path
else:
# We don't have the file :(
return None
target_directory = os.path.dirname(path)
os.makedirs(target_directory, exist_ok=True)
if not self.s3_bucket or not self.s3_key:
raise ValueError("Tried to download a computed file with no s3_bucket or s3_key")
try:
S3.download_file(self.s3_bucket, self.s3_key, path)
# Veryify sync integrity
synced_sha1 = calculate_sha1(path)
if self.sha1 != synced_sha1:
raise AssertionError("SHA1 of downloaded ComputedFile doesn't match database SHA1!")
return path
except Exception as e:
logger.exception(e, computed_file_id=self.pk)
return None
def change_s3_location(self, new_bucket: str, new_key: str) -> bool:
"""Moves the file from its current location in S3.
The new location will be set based on `new_bucket` and
`new_key`. The s3_bucket and s3_key properties will be updated
to reflect this on a successful move.
"""
old_bucket = self.s3_bucket
old_key = self.s3_key
copy_source = {"Bucket": old_bucket, "Key": old_key}
try:
response = S3.copy_object(Bucket=new_bucket, CopySource=copy_source, Key=new_key)
except:
logger.exception(
"Could not copy computed file within S3",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
try:
self.s3_bucket = new_bucket
self.s3_key = new_key
self.save()
except:
logger.exception(
"Could not save computed file after it was copied!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
try:
response = S3.delete_object(Bucket=old_bucket, Key=old_key)
except:
logger.exception(
"Could not delete computed file after it was copied and saved!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
return True
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def delete_local_file(self, force=False):
""" Deletes a file from the path and actually removes it from the file system."""
if not settings.RUNNING_IN_CLOUD and not force:
return
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception(
"Unexpected delete file exception.", absolute_file_path=self.absolute_file_path
)
def delete_s3_file(self, force=False):
# If we're not running in the cloud then we shouldn't try to
# delete something from S3 unless force is set.
if not settings.RUNNING_IN_CLOUD and not force:
return False
try:
S3.delete_object(Bucket=self.s3_bucket, Key=self.s3_key)
except:
logger.exception(
"Failed to delete S3 object for Computed File.",
computed_file=self.id,
s3_object=self.s3_key,
)
return False
self.s3_key = None
self.s3_bucket = None
self.save()
return True
def get_synced_file_path(self, force=False, path=None):
""" Fetches the absolute file path to this ComputedFile, fetching from S3 if it
isn't already available locally. """
if path:
if os.path.exists(path):
return path
else:
return self.sync_from_s3(force, path)
else:
if os.path.exists(self.absolute_file_path):
return self.absolute_file_path
else:
return self.sync_from_s3(force)
@property
def s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
return self.get_s3_url()
def get_s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod="get_object",
Params={"Bucket": self.s3_bucket, "Key": self.s3_key},
ExpiresIn=(60 * 60 * 7 * 24), # 7 days in seconds.
)
else:
return None
def has_been_log2scaled(self):
""" Return true if this is a smashable file that has been log2 scaled """
return self.is_smashable and self.filename.endswith("lengthScaledTPM.tsv")
class Dataset(models.Model):
""" A Dataset is a desired set of experiments/samples to smash and download """
AGGREGATE_CHOICES = (("ALL", "All"), ("EXPERIMENT", "Experiment"), ("SPECIES", "Species"))
SCALE_CHOICES = (
("NONE", "None"),
("MINMAX", "Minmax"),
("STANDARD", "Standard"),
("ROBUST", "Robust"),
)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Experiments and samples live here: {'E-ABC-1': ['SAMP1', 'SAMP2']}
# This isn't going to be queryable, so we can use JSON-in-text, just make
# sure we validate properly in and out!
data = JSONField(
default=dict,
help_text="This is a dictionary where the keys are experiment accession codes and the values are lists with sample accession codes. Eg: `{'E-ABC-1': ['SAMP1', 'SAMP2']}`",
)
# Processing properties
aggregate_by = models.CharField(
max_length=255,
choices=AGGREGATE_CHOICES,
default="EXPERIMENT",
help_text="Specifies how samples are [aggregated](http://docs.refine.bio/en/latest/main_text.html#aggregations).",
)
scale_by = models.CharField(
max_length=255,
choices=SCALE_CHOICES,
default="NONE",
help_text="Specifies options for [transformations](http://docs.refine.bio/en/latest/main_text.html#transformations).",
)
quantile_normalize = models.BooleanField(
default=True,
help_text="Part of the advanced options. Allows [skipping quantile normalization](http://docs.refine.bio/en/latest/faq.html#what-does-it-mean-to-skip-quantile-normalization-for-rna-seq-samples) for RNA-Seq samples.",
)
quant_sf_only = models.BooleanField(
default=False, help_text="Include only quant.sf files in the generated dataset."
)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="Specifies choice of SVD algorithm",
)
# State properties
is_processing = models.BooleanField(default=False) # Data is still editable when False
is_processed = models.BooleanField(default=False) # Result has been made
is_available = models.BooleanField(default=False) # Result is ready for delivery
processor_jobs = models.ManyToManyField(
"data_refinery_common.ProcessorJob", through="ProcessorJobDataSetAssociation"
)
# Fail handling
success = models.NullBooleanField(null=True)
failure_reason = models.TextField()
# Delivery properties
email_address = models.CharField(max_length=255, blank=True, null=True)
email_ccdl_ok = models.BooleanField(default=False)
expires_on = models.DateTimeField(blank=True, null=True)
# Deliverables
s3_bucket = models.CharField(max_length=255)
s3_key = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField(
blank=True,
null=True,
default=0,
help_text="Contains the size in bytes of the processed dataset.",
)
sha1 = models.CharField(max_length=64, null=True, default="")
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Dataset, self).save(*args, **kwargs)
def get_samples(self):
""" Retuns all of the Sample objects in this Dataset """
all_samples = []
for sample_list in self.data.values():
all_samples = all_samples + sample_list
all_samples = list(set(all_samples))
return Sample.objects.filter(accession_code__in=all_samples)
def get_experiments(self):
""" Retuns all of the Experiments objects in this Dataset """
all_experiments = self.data.keys()
return Experiment.objects.filter(accession_code__in=all_experiments)
def get_samples_by_experiment(self):
""" Returns a dict of sample QuerySets, for samples grouped by experiment. """
all_samples = {}
for experiment, samples in self.data.items():
all_samples[experiment] = Sample.objects.filter(accession_code__in=samples)
return all_samples
def get_samples_by_species(self):
""" Returns a dict of sample QuerySets, for samples grouped by species. """
by_species = {}
all_samples = self.get_samples()
for sample in all_samples:
if not by_species.get(sample.organism.name, None):
by_species[sample.organism.name] = [sample]
else:
by_species[sample.organism.name].append(sample)
return by_species
def get_aggregated_samples(self):
""" Uses aggregate_by to return a smasher-ready sample dict. """
if self.aggregate_by == "ALL":
return {"ALL": self.get_samples()}
elif self.aggregate_by == "EXPERIMENT":
return self.get_samples_by_experiment()
else:
return self.get_samples_by_species()
def is_cross_technology(self):
""" Determine if this involves both Microarray + RNASeq"""
if len(self.get_samples().values("technology").distinct()) > 1:
return True
else:
return False
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod="get_object",
Params={"Bucket": self.s3_bucket, "Key": self.s3_key},
ExpiresIn=(60 * 60 * 7 * 24), # 7 days in seconds.
)
else:
return None
def s3_url(self):
""" Render the resulting S3 URL """
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def has_email(self):
""" Returns if the email is set or not """
return bool(self.email_address)
class APIToken(models.Model):
""" Required for starting a smash job """
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Activation
is_activated = models.BooleanField(default=False)
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(APIToken, self).save(*args, **kwargs)
@property
def terms_and_conditions(self):
""" """
return settings.TERMS_AND_CONDITIONS
"""
# Associations
These represent the relationships between items in the other tables.
"""
class ExperimentSampleAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_sample_associations"
unique_together = ("experiment", "sample")
class ExperimentOrganismAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_organism_associations"
unique_together = ("experiment", "organism")
class DownloaderJobOriginalFileAssociation(models.Model):
downloader_job = models.ForeignKey(
"data_refinery_common.DownloaderJob", blank=False, null=False, on_delete=models.CASCADE
)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "downloaderjob_originalfile_associations"
unique_together = ("downloader_job", "original_file")
class ProcessorJobOriginalFileAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE
)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "processorjob_originalfile_associations"
unique_together = ("processor_job", "original_file")
class ProcessorJobDatasetAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE
)
dataset = models.ForeignKey(Dataset, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "processorjob_dataset_associations"
class OriginalFileSampleAssociation(models.Model):
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "original_file_sample_associations"
unique_together = ("original_file", "sample")
class SampleResultAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_result_associations"
unique_together = ("result", "sample")
class SampleComputedFileAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
computed_file = models.ForeignKey(
ComputedFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_computed_file_associations"
unique_together = ("sample", "computed_file")
class ExperimentResultAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "experiment_result_associations"
unique_together = ("result", "experiment")
class CompendiumResultOrganismAssociation(models.Model):
compendium_result = models.ForeignKey(
CompendiumResult, blank=False, null=False, on_delete=models.CASCADE
)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "compendium_result_organism_associations"
unique_together = ("compendium_result", "organism")
<|code_end|>
| api/data_refinery_api/views.py
<|code_start|>from datetime import timedelta, datetime
import requests
import nomad
from typing import Dict
from itertools import groupby
from re import match
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.db.models import Count, Prefetch, DateTimeField, OuterRef, Subquery
from django.db.models.functions import Trunc, Left
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import F, Q
from django.http import (
Http404,
JsonResponse,
)
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_IN,
LOOKUP_QUERY_GT,
)
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from django_elasticsearch_dsl_drf.filter_backends import (
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackend,
)
from django_filters.rest_framework import DjangoFilterBackend
from elasticsearch_dsl import TermsFacet
from rest_framework import status, filters, generics
from rest_framework.exceptions import APIException, NotFound
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.views import APIView
from data_refinery_api.serializers import (
ComputationalResultSerializer,
ComputationalResultWithUrlSerializer,
DetailedExperimentSerializer,
DetailedSampleSerializer,
ExperimentSerializer,
InstitutionSerializer,
OrganismIndexSerializer,
OrganismSerializer,
PlatformSerializer,
ProcessorSerializer,
CompendiumResultSerializer,
CompendiumResultWithUrlSerializer,
QNTargetSerializer,
ComputedFileListSerializer,
OriginalFileListSerializer,
# Job
DownloaderJobSerializer,
ProcessorJobSerializer,
SurveyJobSerializer,
# Dataset
APITokenSerializer,
CreateDatasetSerializer,
DatasetSerializer,
)
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
ComputationalResult,
ComputationalResultAnnotation,
CompendiumResult,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SurveyJob,
)
from data_refinery_common.models.documents import ExperimentDocument
from data_refinery_common.utils import get_active_volumes, get_nomad_jobs_breakdown, get_nomad_jobs
from data_refinery_common.logging import get_and_configure_logger
from .serializers import ExperimentDocumentSerializer
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
##
# ElasticSearch
##
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from six import iteritems
logger = get_and_configure_logger(__name__)
##
# Variables
##
JOB_CREATED_AT_CUTOFF = datetime(2019, 6, 5, tzinfo=timezone.utc)
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet["facet"].get_aggregation()
queryset.aggs.bucket(field, agg).metric(
"total_samples",
"cardinality",
field="downloadable_samples",
precision_threshold=40000,
)
return queryset
##
# ElasticSearch powered Search and Filter
##
@method_decorator(
name="list",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="technology",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name="has_publication",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name="platform",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name="organism",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name="num_processed_samples",
in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
""",
),
)
class ExperimentDocumentView(DocumentViewSet):
""" ElasticSearch powered experiment search. """
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended,
]
# Primitive
lookup_field = "id"
# Define search fields
# Is this exhaustive enough?
search_fields = {
"title": {"boost": 10},
"publication_authors": {"boost": 8}, # "People will search themselves"
"publication_title": {"boost": 5},
"submitter_institution": {"boost": 3},
"description": {"boost": 2},
"accession_code": None,
"alternate_accession_code": None,
"publication_doi": None,
"pubmed_id": None,
"sample_metadata_fields": None,
"platform_names": None,
}
# Define filtering fields
filter_fields = {
"id": {"field": "_id", "lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN],},
"technology": "technology",
"has_publication": "has_publication",
"platform": "platform_accession_codes",
"organism": "organism_names",
"num_processed_samples": {
"field": "num_processed_samples",
"lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN, LOOKUP_QUERY_GT],
},
"num_downloadable_samples": {
"field": "num_downloadable_samples",
"lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN, LOOKUP_QUERY_GT],
},
}
# Define ordering fields
ordering_fields = {
"id": "id",
"title": "title.raw",
"description": "description.raw",
"num_total_samples": "num_total_samples",
"num_downloadable_samples": "num_downloadable_samples",
"source_first_published": "source_first_published",
}
# Specify default ordering
ordering = (
"_score",
"-num_total_samples",
"id",
"title",
"description",
"-source_first_published",
)
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
"technology": {
"field": "technology",
"facet": TermsFacet,
"enabled": True, # These are enabled by default, which is more expensive but more simple.
},
"organism_names": {
"field": "organism_names",
"facet": TermsFacet,
"enabled": True,
"options": {"size": 999999},
},
"platform_accession_codes": {
"field": "platform_accession_codes",
"facet": TermsFacet,
"enabled": True,
"global": False,
"options": {"size": 999999},
},
"has_publication": {
"field": "has_publication",
"facet": TermsFacet,
"enabled": True,
"global": False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
"technology_global": {
"field": "technology",
"facet": TermsFacet,
"enabled": False,
"global": True,
},
"organism_names_global": {
"field": "organism_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
"options": {"size": 999999},
},
"platform_names_global": {
"field": "platform_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
"options": {"size": 999999},
},
"has_publication_global": {
"field": "platform_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
},
}
faceted_search_param = "facet"
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data["facets"] = self.transform_es_facets(response.data["facets"])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet["buckets"]:
if field == "has_publication":
filter_group[bucket["key_as_string"]] = bucket["total_samples"]["value"]
else:
filter_group[bucket["key"]] = bucket["total_samples"]["value"]
result[field] = filter_group
return result
##
# Dataset
##
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(
name="get",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="patch", decorator=swagger_auto_schema(auto_schema=None)
) # partial updates not supported
@method_decorator(
name="put",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
@staticmethod
def _should_display_on_engagement_bot(email: str) -> bool:
return (
email is not None
and email.find("cansav09") != 0
and email.find("arielsvn") != 0
and email.find("jaclyn.n.taroni") != 0
and email.find("kurt.wheeler") != 0
and email.find("greenescientist") != 0
and email.find("@alexslemonade.org") == -1
and email.find("miserlou") != 0
and email.find("michael.zietz@gmail.com") != 0
and email.find("d.prasad") != 0
and email.find("daniel.himmelstein@gmail.com") != 0
and email.find("dv.prasad991@gmail.com") != 0
)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that. """
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data["data"].keys():
accessions = new_data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
new_data["data"][key] = sample_codes
if old_object.is_processed:
raise APIException("You may not update Datasets which have already been processed")
if new_data.get("start"):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
raise APIException("You must provide an active API token ID")
supplied_email_address = self.request.data.get("email_address", None)
email_ccdl_ok = self.request.data.get("email_ccdl_ok", False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if supplied_email_address is not None:
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
if (
settings.RUNNING_IN_CLOUD
and settings.ENGAGEMENTBOT_WEBHOOK is not None
and DatasetView._should_display_on_engagement_bot(supplied_email_address)
):
try:
try:
remote_ip = get_client_ip(self.request)
city = requests.get(
"https://ipapi.co/" + remote_ip + "/json/", timeout=10
).json()["city"]
except Exception:
city = "COULD_NOT_DETERMINE"
user_agent = self.request.META.get("HTTP_USER_AGENT", None)
total_downloads = Dataset.objects.filter(
email_address=supplied_email_address
).count()
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments": [
{
"color": "good",
"title": "New dataset download",
"fallback": "New dataset download",
"title_link": "http://www.refine.bio/dataset/{0}".format(
obj.id
),
"text": "New user {0} from {1} downloaded a dataset!".format(
supplied_email_address, city
),
"footer": "Refine.bio | {0} | {1}".format(
remote_ip, user_agent
),
"footer_icon": "https://s3.amazonaws.com/refinebio-email/logo-2x.png",
"fields": [
{"title": "Dataset id", "value": str(obj.id),},
{
"title": "Total downloads",
"value": total_downloads,
"short": True,
},
{
"title": "Samples",
"value": obj.get_total_samples(),
"short": True,
},
],
}
],
},
headers={"Content-Type": "application/json"},
timeout=10,
)
except Exception as e:
# It doens't really matter if this didn't work
logger.error(e)
pass
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data["data"] = old_data
serializer.validated_data["aggregate_by"] = old_aggregate
serializer.save()
class CreateApiTokenView(generics.CreateAPIView):
"""
token_create
This endpoint can be used to create and activate tokens. These tokens can be used
in requests that provide urls to download computed files. They are a way to accept
our terms of service.
```py
import requests
import json
response = requests.post('https://api.refine.bio/v1/token/')
token_id = response.json()['id']
response = requests.put('https://api.refine.bio/v1/token/' + token_id + '/', json.dumps({'is_activated': True}), headers={'Content-Type': 'application/json'})
```
The token id needs to be provided in the HTTP request in the API-KEY header.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
model = APIToken
serializer_class = APITokenSerializer
@method_decorator(name="patch", decorator=swagger_auto_schema(auto_schema=None))
class APITokenView(generics.RetrieveUpdateAPIView):
"""
Read and modify Api Tokens.
get:
Return details about a specific token.
put:
This can be used to activate a specific token by sending `is_activated: true`.
"""
model = APIToken
lookup_field = "id"
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
##
# Experiments
##
class ExperimentList(generics.ListAPIView):
""" Paginated list of all experiments. Advanced filtering can be done with the `/search` endpoint. """
model = Experiment
queryset = Experiment.public_objects.all()
serializer_class = ExperimentSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
"title",
"description",
"accession_code",
"alternate_accession_code",
"source_database",
"source_url",
"has_publication",
"publication_title",
"publication_doi",
"pubmed_id",
"organisms",
"submitter_institution",
"created_at",
"last_modified",
"source_first_published",
"source_last_modified",
)
class ExperimentDetail(generics.RetrieveAPIView):
""" Retrieve details for an experiment given it's accession code """
lookup_field = "accession_code"
queryset = Experiment.public_objects.all()
serializer_class = DetailedExperimentSerializer
##
# Samples
##
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="dataset_id",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns samples that are added to a dataset.",
),
openapi.Parameter(
name="experiment_accession_code",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filters the result and only returns only the samples associated with an experiment accession code.",
),
openapi.Parameter(
name="accession_codes",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Provide a list of sample accession codes separated by commas and the endpoint will only return information about these samples.",
),
]
),
)
class SampleList(generics.ListAPIView):
""" Returns detailed information about Samples """
model = Sample
serializer_class = DetailedSampleSerializer
filter_backends = (filters.OrderingFilter, DjangoFilterBackend)
ordering_fields = "__all__"
ordering = "-is_processed"
filterset_fields = (
"title",
"organism",
"source_database",
"source_archive_url",
"has_raw",
"platform_name",
"technology",
"manufacturer",
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
"is_processed",
"is_public",
)
def get_queryset(self):
"""
ref https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
"""
queryset = (
Sample.public_objects.prefetch_related("organism")
.prefetch_related("results")
.prefetch_related("results__processor")
.prefetch_related("results__computationalresultannotation_set")
.prefetch_related("results__computedfile_set")
.filter(**self.get_query_params_filters())
)
# case insensitive search https://docs.djangoproject.com/en/2.1/ref/models/querysets/#icontains
filter_by = self.request.query_params.get("filter_by", None)
if filter_by:
queryset = queryset.filter(
Q(accession_code__icontains=filter_by)
| Q(title__icontains=filter_by)
| Q(sex__icontains=filter_by)
| Q(age__icontains=filter_by)
| Q(specimen_part__icontains=filter_by)
| Q(genotype__icontains=filter_by)
| Q(disease__icontains=filter_by)
| Q(disease_stage__icontains=filter_by)
| Q(cell_line__icontains=filter_by)
| Q(treatment__icontains=filter_by)
| Q(race__icontains=filter_by)
| Q(subject__icontains=filter_by)
| Q(compound__icontains=filter_by)
| Q(time__icontains=filter_by)
)
return queryset
def get_query_params_filters(self):
""" We do advanced filtering on the queryset depending on the query parameters.
This returns the parameters that should be used for that. """
filter_dict = dict()
ids = self.request.query_params.get("ids", None)
if ids is not None:
ids = [int(x) for x in ids.split(",")]
filter_dict["pk__in"] = ids
experiment_accession_code = self.request.query_params.get("experiment_accession_code", None)
if experiment_accession_code:
experiment = get_object_or_404(
Experiment.objects.values("id"), accession_code=experiment_accession_code
)
filter_dict["experiments__in"] = [experiment["id"]]
accession_codes = self.request.query_params.get("accession_codes", None)
if accession_codes:
accession_codes = accession_codes.split(",")
filter_dict["accession_code__in"] = accession_codes
dataset_id = self.request.query_params.get("dataset_id", None)
if dataset_id:
dataset = get_object_or_404(Dataset, id=dataset_id)
# Python doesn't provide a prettier way of doing this that I know about.
filter_dict["accession_code__in"] = [
item for sublist in dataset.data.values() for item in sublist
]
# Accept Organism in both name and ID form
organism_name = self.request.query_params.get("organism__name", None)
if organism_name:
filter_dict["organism__name"] = organism_name
return filter_dict
class SampleDetail(generics.RetrieveAPIView):
""" Retrieve the details for a Sample given it's accession code """
lookup_field = "accession_code"
queryset = Sample.public_objects.all()
serializer_class = DetailedSampleSerializer
##
# Processor
##
class ProcessorList(generics.ListAPIView):
"""List all processors."""
queryset = Processor.objects.all()
serializer_class = ProcessorSerializer
##
# Results
##
class ComputationalResultsList(generics.ListAPIView):
"""
computational_results_list
This lists all `ComputationalResult`. Each one contains meta-information about the output of a computer process. (Ex Salmon).
This can return valid S3 urls if a valid [token](#tag/token) is sent in the header `HTTP_API_KEY`.
"""
queryset = ComputationalResult.public_objects.all()
def get_serializer_class(self):
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return ComputationalResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return ComputationalResultSerializer
def filter_queryset(self, queryset):
filter_dict = self.request.query_params.dict()
filter_dict.pop("limit", None)
filter_dict.pop("offset", None)
return queryset.filter(**filter_dict)
##
# Search Filter Models
##
class OrganismList(generics.ListAPIView):
"""
Unpaginated list of all the available organisms.
"""
queryset = Organism.objects.all()
serializer_class = OrganismSerializer
paginator = None
class PlatformList(generics.ListAPIView):
"""
Unpaginated list of all the available "platform" information
"""
serializer_class = PlatformSerializer
paginator = None
def get_queryset(self):
return (
Sample.public_objects.all()
.values("platform_accession_code", "platform_name")
.distinct()
)
class InstitutionList(generics.ListAPIView):
"""
Unpaginated list of all the available "institution" information
"""
serializer_class = InstitutionSerializer
paginator = None
def get_queryset(self):
return Experiment.public_objects.all().values("submitter_institution").distinct()
##
# Jobs
##
class SurveyJobList(generics.ListAPIView):
"""
List of all SurveyJob.
"""
model = SurveyJob
queryset = SurveyJob.objects.all()
serializer_class = SurveyJobSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = SurveyJobSerializer.Meta.fields
ordering_fields = ("id", "created_at")
ordering = ("-id",)
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="sample_accession_code",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="List the downloader jobs associated with a sample",
),
openapi.Parameter(
name="nomad",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Only return jobs that are in the nomad queue currently",
),
]
),
)
class DownloaderJobList(generics.ListAPIView):
"""
List of all DownloaderJob
"""
model = DownloaderJob
serializer_class = DownloaderJobSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = DownloaderJobSerializer.Meta.fields
ordering_fields = ("id", "created_at")
ordering = ("-id",)
def get_queryset(self):
queryset = DownloaderJob.objects.all()
sample_accession_code = self.request.query_params.get("sample_accession_code", None)
if sample_accession_code:
queryset = queryset.filter(
original_files__samples__accession_code=sample_accession_code
).distinct()
nomad = self.request.query_params.get("nomad", None)
if nomad:
running_nomad_jobs_ids = [
job["ID"] for job in get_nomad_jobs() if job["Status"] == "running"
]
queryset = queryset.filter(nomad_job_id__in=running_nomad_jobs_ids)
return queryset
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="sample_accession_code",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="List the processor jobs associated with a sample",
),
openapi.Parameter(
name="nomad",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Only return jobs that are in the nomad queue currently",
),
]
),
)
class ProcessorJobList(generics.ListAPIView):
"""
List of all ProcessorJobs.
"""
model = ProcessorJob
serializer_class = ProcessorJobSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = ProcessorJobSerializer.Meta.fields
ordering_fields = ("id", "created_at")
ordering = ("-id",)
def get_queryset(self):
queryset = ProcessorJob.objects.all()
sample_accession_code = self.request.query_params.get("sample_accession_code", None)
if sample_accession_code:
queryset = queryset.filter(
original_files__samples__accession_code=sample_accession_code
).distinct()
nomad = self.request.query_params.get("nomad", None)
if nomad:
running_nomad_jobs_ids = [
job["ID"] for job in get_nomad_jobs() if job["Status"] == "running"
]
queryset = queryset.filter(nomad_job_id__in=running_nomad_jobs_ids)
return queryset
###
# Statistics
###
def get_start_date(range_param):
current_date = datetime.now(tz=timezone.utc)
return {
"day": current_date - timedelta(days=1),
"week": current_date - timedelta(weeks=1),
"month": current_date - timedelta(days=30),
"year": current_date - timedelta(days=365),
}.get(range_param)
def paginate_queryset_response(queryset, request):
paginator = LimitOffsetPagination()
page_items = paginator.paginate_queryset(queryset, request)
return Response(
data={
"results": [x.to_dict() for x in page_items],
"limit": paginator.limit,
"offset": paginator.offset,
"count": paginator.count,
},
status=status.HTTP_200_OK,
)
class FailedDownloaderJobStats(APIView):
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="range",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=("day", "week", "month", "year",),
)
]
)
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop("range", "day")
start_date = get_start_date(range_param)
jobs = (
DownloaderJob.objects.filter(created_at__gt=start_date)
.annotate(reason=Left("failure_reason", 80))
.values("reason")
.annotate(
job_count=Count("reason"),
sample_count=Count(
"original_files__samples",
distinct=True,
filter=Q(original_files__samples__is_processed=False),
),
)
.order_by("-job_count")
)
return paginate_queryset_response(jobs, request)
class FailedProcessorJobStats(APIView):
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="range",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=("day", "week", "month", "year",),
)
]
)
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop("range", "day")
start_date = get_start_date(range_param)
jobs = (
ProcessorJob.objects.filter(created_at__gt=start_date)
.annotate(reason=Left("failure_reason", 80))
.values("reason")
.annotate(
job_count=Count("reason"),
sample_count=Count(
"original_files__samples",
distinct=True,
filter=Q(original_files__samples__is_processed=False),
),
)
.order_by("-job_count")
)
return paginate_queryset_response(jobs, request)
class AboutStats(APIView):
""" Returns general stats for the site, used in the about page """
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
# static values for now
dummy = request.query_params.dict().pop("dummy", None)
if dummy:
# add a dummy response, calculated these on 09/25/2019
result = {
"samples_available": 904953 + 391022,
"total_size_in_bytes": 832195361132962,
"supported_organisms": 43 + 159,
"experiments_processed": 35785 + 8661,
}
return Response(result)
result = {
"samples_available": self._get_samples_available(),
"total_size_in_bytes": OriginalFile.objects.aggregate(total_size=Sum("size_in_bytes"))[
"total_size"
],
"supported_organisms": self._get_supported_organisms(),
"experiments_processed": self._get_experiments_processed(),
}
return Response(result)
def _get_experiments_processed(self):
""" total experiments with at least one sample processed """
experiments_with_sample_processed = (
Experiment.objects.annotate(
processed_samples_count=Count("samples", filter=Q(samples__is_processed=True)),
)
.filter(Q(processed_samples_count__gt=1))
.count()
)
experiments_with_sample_quant = (
ComputedFile.objects.filter(filename="quant.sf", result__samples__is_processed=False)
.values_list("result__samples__experiments", flat=True)
.distinct()
.count()
)
return experiments_with_sample_processed + experiments_with_sample_quant
def _get_supported_organisms(self):
""" count organisms with qn targets or that have at least one sample with quant files """
organisms_with_qn_targets = Organism.objects.filter(qn_target__isnull=False).count()
organisms_without_qn_targets = (
Organism.objects.filter(
qn_target__isnull=True,
sample__is_processed=False,
sample__technology="RNA-SEQ",
sample__results__computedfile__filename="quant.sf",
)
.distinct()
.count()
)
return organisms_with_qn_targets + organisms_without_qn_targets
def _get_samples_available(self):
""" count the total number of samples that are processed or that have a quant.sf file associated with them """
processed_samples = Sample.objects.filter(is_processed=True).count()
unprocessed_samples_with_quant = (
Sample.objects.filter(
is_processed=False, technology="RNA-SEQ", results__computedfile__filename="quant.sf"
)
.distinct()
.count()
)
return processed_samples + unprocessed_samples_with_quant
class Stats(APIView):
""" Statistics about the health of the system. """
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="range",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Specify a range from which to calculate the possible options",
enum=("day", "week", "month", "year",),
)
]
)
@method_decorator(cache_page(10 * 60))
def get(self, request, version, format=None):
range_param = request.query_params.dict().pop("range", None)
cached_stats = Stats.calculate_stats(range_param)
return Response(cached_stats)
@classmethod
def calculate_stats(cls, range_param):
data = {}
data["generated_on"] = timezone.now()
data["survey_jobs"] = cls._get_job_stats(SurveyJob.objects, range_param)
data["downloader_jobs"] = cls._get_job_stats(DownloaderJob.objects, range_param)
data["processor_jobs"] = cls._get_job_stats(ProcessorJob.objects, range_param)
data["experiments"] = cls._get_object_stats(Experiment.objects, range_param)
# processed and unprocessed samples stats
data["unprocessed_samples"] = cls._get_object_stats(
Sample.objects.filter(is_processed=False), range_param, "last_modified"
)
data["processed_samples"] = cls._get_object_stats(
Sample.processed_objects, range_param, "last_modified"
)
data["processed_samples"]["last_hour"] = cls._samples_processed_last_hour()
data["processed_samples"]["technology"] = {}
techs = Sample.processed_objects.values("technology").annotate(count=Count("technology"))
for tech in techs:
if not tech["technology"] or not tech["technology"].strip():
continue
data["processed_samples"]["technology"][tech["technology"]] = tech["count"]
data["processed_samples"]["organism"] = {}
organisms = Sample.processed_objects.values("organism__name").annotate(
count=Count("organism__name")
)
for organism in organisms:
if not organism["organism__name"]:
continue
data["processed_samples"]["organism"][organism["organism__name"]] = organism["count"]
data["processed_experiments"] = cls._get_object_stats(Experiment.processed_public_objects)
data["active_volumes"] = list(get_active_volumes())
data["dataset"] = cls._get_dataset_stats(range_param)
if range_param:
data["input_data_size"] = cls._get_input_data_size()
data["output_data_size"] = cls._get_output_data_size()
data.update(get_nomad_jobs_breakdown())
return data
EMAIL_USERNAME_BLACKLIST = [
"arielsvn",
"cansav09",
"d.prasad",
"daniel.himmelstein",
"dv.prasad991",
"greenescientist",
"jaclyn.n.taroni",
"kurt.wheeler91",
"michael.zietz",
"miserlou",
]
@classmethod
def _get_dataset_stats(cls, range_param):
"""Returns stats for processed datasets"""
filter_query = Q()
for username in Stats.EMAIL_USERNAME_BLACKLIST:
filter_query = filter_query | Q(email_address__startswith=username)
filter_query = filter_query | Q(email_address__endswith="@alexslemonade.org")
processed_datasets = Dataset.objects.filter(
is_processed=True, email_address__isnull=False
).exclude(filter_query)
result = processed_datasets.aggregate(
total=Count("id"),
aggregated_by_experiment=Count("id", filter=Q(aggregate_by="EXPERIMENT")),
aggregated_by_species=Count("id", filter=Q(aggregate_by="SPECIES")),
scale_by_none=Count("id", filter=Q(scale_by="NONE")),
scale_by_minmax=Count("id", filter=Q(scale_by="MINMAX")),
scale_by_standard=Count("id", filter=Q(scale_by="STANDARD")),
scale_by_robust=Count("id", filter=Q(scale_by="ROBUST")),
)
if range_param:
# We don't save the dates when datasets are processed, but we can use
# `last_modified`, since datasets aren't modified again after they are processed
result["timeline"] = cls._get_intervals(
processed_datasets, range_param, "last_modified"
).annotate(total=Count("id"), total_size=Sum("size_in_bytes"))
return result
@classmethod
def _samples_processed_last_hour(cls):
current_date = datetime.now(tz=timezone.utc)
start = current_date - timedelta(hours=1)
return Sample.processed_objects.filter(last_modified__range=(start, current_date)).count()
@classmethod
def _get_input_data_size(cls):
total_size = OriginalFile.objects.filter(sample__is_processed=True).aggregate( # <-- SLOW
Sum("size_in_bytes")
)
return total_size["size_in_bytes__sum"] if total_size["size_in_bytes__sum"] else 0
@classmethod
def _get_output_data_size(cls):
total_size = (
ComputedFile.public_objects.all()
.filter(s3_bucket__isnull=False, s3_key__isnull=True)
.aggregate(Sum("size_in_bytes"))
)
return total_size["size_in_bytes__sum"] if total_size["size_in_bytes__sum"] else 0
@classmethod
def _get_job_stats(cls, jobs, range_param):
start_filter = Q()
if range_param:
start_date = get_start_date(range_param)
start_filter = start_filter | Q(start_time__gte=start_date) | Q(start_time__isnull=True)
result = jobs.filter(start_filter).aggregate(
total=Count("id"),
successful=Count("id", filter=Q(success=True)),
failed=Count("id", filter=Q(success=False)),
pending=Count(
"id",
filter=Q(
start_time__isnull=True,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF,
),
),
open=Count(
"id",
filter=Q(
start_time__isnull=False,
success__isnull=True,
created_at__gt=JOB_CREATED_AT_CUTOFF,
),
),
)
# via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
result["average_time"] = (
jobs.filter(start_filter)
.filter(start_time__isnull=False, end_time__isnull=False, success=True)
.aggregate(average_time=Avg(F("end_time") - F("start_time")))["average_time"]
)
if not result["average_time"]:
result["average_time"] = 0
else:
result["average_time"] = result["average_time"].total_seconds()
if range_param:
result["timeline"] = cls._get_intervals(jobs, range_param).annotate(
total=Count("id"),
successful=Count("id", filter=Q(success=True)),
failed=Count("id", filter=Q(success=False)),
pending=Count("id", filter=Q(start_time__isnull=True, success__isnull=True)),
open=Count("id", filter=Q(start_time__isnull=False, success__isnull=True)),
)
return result
@classmethod
def _get_object_stats(cls, objects, range_param=False, field="created_at"):
result = {"total": objects.count()}
if range_param:
result["timeline"] = cls._get_intervals(objects, range_param, field).annotate(
total=Count("id")
)
return result
@classmethod
def _get_intervals(cls, objects, range_param, field="last_modified"):
range_to_trunc = {"day": "hour", "week": "day", "month": "day", "year": "month"}
# truncate the parameterized field so it can be annotated by range
# ie. each day is composed of 24 hours...
start_trunc = Trunc(field, range_to_trunc.get(range_param), output_field=DateTimeField())
# get the correct start time for the range
start_range = get_start_date(range_param)
# annotate and filter in a single query
# ref https://stackoverflow.com/a/38359913/763705
return objects.annotate(start=start_trunc).values("start").filter(start__gte=start_range)
###
# Transcriptome Indices
###
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="organism_name",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name="length",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Short hand for `index_type` Eg. `short` or `long`",
),
openapi.Parameter(
name="salmon_version",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Eg. `salmon 0.13.1`",
),
openapi.Parameter(
name="index_type",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Eg. `TRANSCRIPTOME_LONG`",
),
]
),
)
class TranscriptomeIndexList(generics.ListAPIView):
"""
List all Transcriptome Indices. These are a special type of process result,
necessary for processing other SRA samples.
"""
serializer_class = OrganismIndexSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = ["salmon_version", "index_type"]
ordering_fields = ("created_at", "salmon_version")
ordering = ("-created_at",)
def get_queryset(self):
queryset = OrganismIndex.public_objects.all()
organism_name = self.request.GET.get("organism_name", None)
if organism_name is not None:
queryset = queryset.filter(organism__name=organism_name.upper())
length = self.request.GET.get("length", None)
if length is not None:
index_type = "TRANSCRIPTOME_{}".format(length.upper())
queryset = queryset.filter(index_type=index_type)
return queryset
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="id",
in_=openapi.IN_PATH,
type=openapi.TYPE_NUMBER,
description="Transcriptome Index Id eg `1`",
),
]
),
)
class TranscriptomeIndexDetail(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
lookup_field = "id"
queryset = OrganismIndex.public_objects.all()
###
# Compendia
###
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="latest_version",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="`True` will only return the highest `compendium_version` for each primary_organism.",
),
openapi.Parameter(
name="quant_sf_only",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="`True` for RNA-seq Sample Compendium results or `False` for quantile normalized.",
),
]
),
)
class CompendiumResultList(generics.ListAPIView):
"""
List all CompendiaResults with filtering.
"""
model = CompendiumResult
queryset = CompendiumResult.objects.all()
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = ["primary_organism__name", "compendium_version", "quant_sf_only"]
ordering_fields = ("primary_organism__name", "compendium_version", "id")
ordering = ("primary_organism__name",)
def get_queryset(self):
public_result_queryset = CompendiumResult.objects.filter(result__is_public=True)
latest_version = self.request.query_params.get("latest_version", False)
if latest_version:
version_filter = Q(
primary_organism=OuterRef("primary_organism"),
quant_sf_only=OuterRef("quant_sf_only"),
)
latest_version = (
public_result_queryset.filter(version_filter)
.order_by("-compendium_version")
.values("compendium_version")
)
return public_result_queryset.annotate(
latest_version=Subquery(latest_version[:1])
).filter(compendium_version=F("latest_version"))
return public_result_queryset
def get_serializer_class(self):
try:
token_id = self.request.META.get("HTTP_API_KEY", None)
token = APIToken.objects.get(id=token_id, is_activated=True)
return CompendiumResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return CompendiumResultSerializer
class CompendiumResultDetails(generics.RetrieveAPIView):
"""
Get a specific Compendium Result
"""
model = CompendiumResult
queryset = CompendiumResult.objects.filter(is_public=True)
lookup_field = "id"
def get_serializer_class(self):
try:
token_id = self.request.META.get("HTTP_API_KEY", None)
token = APIToken.objects.get(id=token_id, is_activated=True)
return CompendiumResultWithUrlSerializer
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return CompendiumResultSerializer
###
# QN Targets
###
class QNTargetsAvailable(generics.ListAPIView):
"""
This is a list of all of the organisms which have available QN Targets
"""
serializer_class = OrganismSerializer
paginator = None
def get_queryset(self):
return Organism.get_objects_with_qn_targets()
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="organism_name",
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
description="Eg `DANIO_RERIO`, `MUS_MUSCULUS`",
)
],
responses={404: "QN Target not found for the given organism."},
),
)
class QNTargetsDetail(generics.RetrieveAPIView):
"""
Get a detailed view of the Quantile Normalization file for an organism.
"""
serializer_class = QNTargetSerializer
def get_object(self):
organism = self.kwargs["organism_name"]
organism = organism.upper().replace(" ", "_")
try:
organism_id = Organism.get_object_for_name(organism).id
annotation = (
ComputationalResultAnnotation.objects.filter(
data__organism_id=organism_id, data__is_qn=True
)
.order_by("-created_at")
.first()
)
qn_target = annotation.result.computedfile_set.first()
except Exception:
raise NotFound("Don't have a target for that organism!")
if not qn_target:
raise NotFound("Don't have a target for that organism!!")
return qn_target
##
# Computed Files
##
class ComputedFilesList(generics.ListAPIView):
"""
computed_files_list
ComputedFiles are representation of files created by data-refinery processes.
This can also be used to fetch all the compendia files we have generated with:
```
GET /computed_files?is_compendia=True&is_public=True
```
"""
queryset = ComputedFile.objects.all()
serializer_class = ComputedFileListSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = (
"id",
"samples",
"is_qn_target",
"is_smashable",
"is_qc",
"is_compendia",
"quant_sf_only",
"svd_algorithm",
"compendia_version",
"created_at",
"last_modified",
)
ordering_fields = (
"id",
"created_at",
"last_modified",
"compendia_version",
)
ordering = ("-id",)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(ComputedFilesList, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
class OriginalFileList(generics.ListAPIView):
"""
original_files_list
List Original Files that are associated with Samples. These are the files we proccess.
"""
queryset = OriginalFile.objects.all()
serializer_class = OriginalFileListSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = OriginalFileListSerializer.Meta.fields
ordering_fields = (
"id",
"created_at",
"last_modified",
)
ordering = ("-id",)
# error handlers
def handle404error(request, exception):
message = "The requested resource was not found on this server."
url = "https://api.refine.bio/"
# check to see if the 404ed request contained a version
if not match(r"^/v[1-9]/.*", request.path):
message = "refine.bio API resources are only available through versioned requests."
return JsonResponse({"message": message, "docs": url, "status_code": 404,}, status=404)
def handle500error(request):
return JsonResponse(
{"message": "A server error occured. This has been reported.", "status_code": 500,},
status=500,
)
##
# Util
##
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
<|code_end|>
common/data_refinery_common/models/models.py
<|code_start|>import hashlib
import io
import os
import shutil
import pytz
import uuid
import boto3
from botocore.client import Config
from datetime import datetime
from functools import partial
from typing import Dict, Set
from django.db.models import Count, Prefetch, DateTimeField
from django.db.models.expressions import F, Q
from django.conf import settings
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import transaction
from django.db import models
from django.utils import timezone
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models.organism import Organism
from data_refinery_common.utils import (
get_env_variable,
get_s3_url,
calculate_file_size,
calculate_sha1,
FileUtils,
)
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client("s3", config=Config(signature_version="s3v4"))
logger = get_and_configure_logger(__name__)
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
# We store what salmon ouptuts as its version, therefore for
# comparisions or defaults we shouldn't just store the version string,
# we need something with the pattern: 'salmon X.X.X'
CURRENT_SALMON_VERSION = "salmon " + get_env_variable("SALMON_VERSION", "0.13.1")
CHUNK_SIZE = 1024 * 256 # chunk_size is in bytes
"""
# First Order Classes
This represent the primary data types we will be querying
and filtering against.
"""
class PublicObjectsManager(models.Manager):
"""
Only returns objects that have is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class ProcessedObjectsManager(models.Manager):
"""
Only returns objects that have is_processed and is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_processed=True, is_public=True)
class Sample(models.Model):
"""
An individual sample.
"""
class Meta:
db_table = "samples"
base_manager_name = "public_objects"
get_latest_by = "created_at"
indexes = [
models.Index(fields=["accession_code"]),
]
def __str__(self):
return self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_objects = ProcessedObjectsManager()
# Identifiers
accession_code = models.CharField(max_length=255, unique=True)
title = models.CharField(max_length=255, unique=False, blank=True)
# Relations
organism = models.ForeignKey(Organism, blank=True, null=True, on_delete=models.SET_NULL)
results = models.ManyToManyField("ComputationalResult", through="SampleResultAssociation")
original_files = models.ManyToManyField("OriginalFile", through="OriginalFileSampleAssociation")
computed_files = models.ManyToManyField("ComputedFile", through="SampleComputedFileAssociation")
experiments = models.ManyToManyField("Experiment", through="ExperimentSampleAssociation")
# Historical Properties
source_database = models.CharField(max_length=255, blank=False)
source_archive_url = models.CharField(max_length=255)
source_filename = models.CharField(max_length=255, blank=False)
source_absolute_file_path = models.CharField(max_length=255)
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Technological Properties
platform_accession_code = models.CharField(max_length=256, blank=True)
platform_name = models.CharField(max_length=256, blank=True)
technology = models.CharField(max_length=256, blank=True) # MICROARRAY, RNA-SEQ
manufacturer = models.CharField(max_length=256, blank=True)
protocol_info = JSONField(default=dict)
# Scientific Properties
sex = models.CharField(max_length=255, blank=True)
age = models.DecimalField(max_length=255, blank=True, max_digits=8, decimal_places=3, null=True)
specimen_part = models.CharField(max_length=255, blank=True)
genotype = models.CharField(max_length=255, blank=True)
disease = models.CharField(max_length=255, blank=True)
disease_stage = models.CharField(max_length=255, blank=True)
cell_line = models.CharField(max_length=255, blank=True)
treatment = models.CharField(max_length=255, blank=True)
race = models.CharField(max_length=255, blank=True)
subject = models.CharField(max_length=255, blank=True)
compound = models.CharField(max_length=255, blank=True)
time = models.CharField(max_length=255, blank=True)
# Crunch Properties
is_processed = models.BooleanField(default=False)
# Blacklisting
is_blacklisted = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Sample, self).save(*args, **kwargs)
def to_metadata_dict(self):
"""Render this Sample as a dict."""
metadata = {}
metadata["refinebio_title"] = self.title
metadata["refinebio_accession_code"] = self.accession_code
metadata["refinebio_organism"] = self.organism.name if self.organism else None
metadata["refinebio_source_database"] = self.source_database
metadata["refinebio_source_archive_url"] = self.source_archive_url
metadata["refinebio_sex"] = self.sex
metadata["refinebio_age"] = self.age or ""
metadata["refinebio_specimen_part"] = self.specimen_part
metadata["refinebio_genetic_information"] = self.genotype
metadata["refinebio_disease"] = self.disease
metadata["refinebio_disease_stage"] = self.disease_stage
metadata["refinebio_cell_line"] = self.cell_line
metadata["refinebio_treatment"] = self.treatment
metadata["refinebio_race"] = self.race
metadata["refinebio_subject"] = self.subject
metadata["refinebio_compound"] = self.compound
metadata["refinebio_time"] = self.time
metadata["refinebio_platform"] = self.pretty_platform
metadata["refinebio_annotations"] = [
data for data in self.sampleannotation_set.all().values_list("data", flat=True)
]
return metadata
# Returns a set of ProcessorJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_processor_jobs(self) -> Set:
processor_jobs = set()
for original_file in self.original_files.prefetch_related("processor_jobs").all():
for processor_job in original_file.processor_jobs.all():
processor_jobs.add(processor_job)
return processor_jobs
# Returns a set of DownloaderJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_downloader_jobs(self) -> Set:
downloader_jobs = set()
for original_file in self.original_files.prefetch_related("downloader_jobs").all():
for downloader_job in original_file.downloader_jobs.all():
downloader_jobs.add(downloader_job)
return downloader_jobs
def get_result_files(self):
""" Get all of the ComputedFile objects associated with this Sample """
return self.computed_files.all()
def get_most_recent_smashable_result_file(self):
""" Get the most recent of the ComputedFile objects associated with this Sample """
try:
latest_computed_file = self.computed_files.filter(
is_public=True, is_smashable=True,
).latest()
return latest_computed_file
except ComputedFile.DoesNotExist as e:
# This sample has no smashable files yet.
return None
def get_most_recent_quant_sf_file(self):
""" Returns the latest quant.sf file that was generated for this sample.
Note: We don't associate that file to the computed_files of this sample, that's
why we have to go through the computational results. """
return (
ComputedFile.objects.filter(
result__in=self.results.all(),
filename="quant.sf",
s3_key__isnull=False,
s3_bucket__isnull=False,
)
.order_by("-created_at")
.first()
)
@property
def pretty_platform(self):
""" Turns
[HT_HG-U133_Plus_PM] Affymetrix HT HG-U133+ PM Array Plate
into
Affymetrix HT HG-U133+ PM Array Plate (hthgu133pluspm)
"""
if "]" in self.platform_name:
platform_base = self.platform_name.split("]")[1].strip()
else:
platform_base = self.platform_name
return platform_base + " (" + self.platform_accession_code + ")"
class SampleAnnotation(models.Model):
""" Semi-standard information associated with a Sample """
class Meta:
db_table = "sample_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(SampleAnnotation, self).save(*args, **kwargs)
class ProcessedPublicObjectsManager(models.Manager):
"""
Only returns Experiments that are is_public and have related is_processed Samples.
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True, num_processed_samples__gt=0)
class Experiment(models.Model):
""" An Experiment or Study """
class Meta:
db_table = "experiments"
base_manager_name = "public_objects"
def __str__(self):
return "Experiment: " + self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_public_objects = ProcessedPublicObjectsManager()
# Relations
samples = models.ManyToManyField("Sample", through="ExperimentSampleAssociation")
organisms = models.ManyToManyField("Organism", through="ExperimentOrganismAssociation")
# Identifiers
accession_code = models.CharField(max_length=64, unique=True)
alternate_accession_code = models.CharField(max_length=64, unique=True, null=True)
# Historical Properties
source_database = models.CharField(max_length=32) # "ArrayExpress, "SRA", "GEO"
source_url = models.TextField()
# Properties
# I was always under the impression that TextFields were slower
# than CharFields, however the Postgres documentation disagrees:
# https://www.postgresql.org/docs/9.0/static/datatype-character.html
title = models.TextField()
description = models.TextField()
protocol_description = JSONField(default=dict)
technology = models.CharField(max_length=256, blank=True)
submitter_institution = models.CharField(max_length=256, blank=True)
has_publication = models.BooleanField(default=False)
publication_title = models.TextField(default="")
publication_doi = models.CharField(max_length=64, blank=True)
publication_authors = ArrayField(models.TextField(), default=list)
pubmed_id = models.CharField(max_length=32, blank=True)
source_first_published = models.DateTimeField(null=True)
source_last_modified = models.DateTimeField(null=True)
# Cached Computed Properties
num_total_samples = models.IntegerField(default=0)
num_processed_samples = models.IntegerField(default=0)
num_downloadable_samples = models.IntegerField(default=0)
sample_metadata_fields = ArrayField(models.TextField(), default=list)
platform_names = ArrayField(models.TextField(), default=list)
platform_accession_codes = ArrayField(models.TextField(), default=list)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
if self.accession_code and not self.alternate_accession_code:
if self.accession_code.startswith("GSE"):
self.alternate_accession_code = "E-GEOD-" + self.accession_code[3:]
elif self.accession_code.startswith("E-GEOD-"):
self.alternate_accession_code = "GSE" + self.accession_code[7:]
return super(Experiment, self).save(*args, **kwargs)
def update_num_samples(self):
""" Update our cache values """
aggregates = self.samples.aggregate(
num_total_samples=Count("id"),
num_processed_samples=Count("id", filter=Q(is_processed=True)),
num_downloadable_samples=Count(
"id", filter=Q(is_processed=True, organism__qn_target__isnull=False)
),
)
self.num_total_samples = aggregates["num_total_samples"]
self.num_processed_samples = aggregates["num_processed_samples"]
self.num_downloadable_samples = aggregates["num_downloadable_samples"]
self.save()
def to_metadata_dict(self):
""" Render this Experiment as a dict """
metadata = {}
metadata["title"] = self.title
metadata["accession_code"] = self.accession_code
metadata["organisms"] = list(self.organisms.all().values_list("name", flat=True))
metadata["sample_accession_codes"] = list(
self.samples.all().values_list("accession_code", flat=True)
)
metadata["description"] = self.description
metadata["protocol_description"] = self.protocol_description
metadata["technology"] = self.technology
metadata["submitter_institution"] = self.submitter_institution
metadata["has_publication"] = self.has_publication
metadata["publication_title"] = self.publication_title
metadata["publication_doi"] = self.publication_doi
metadata["pubmed_id"] = self.pubmed_id
if self.source_first_published:
metadata["source_first_published"] = self.source_first_published.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_first_published"] = ""
if self.source_last_modified:
metadata["source_last_modified"] = self.source_last_modified.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_last_modified"] = ""
return metadata
def get_sample_metadata_fields(self):
""" Get all metadata fields that are non-empty for at least one sample in the experiment.
See https://github.com/AlexsLemonade/refinebio-frontend/issues/211 for why this is needed.
"""
fields = []
possible_fields = [
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
]
samples = self.samples.all()
for field in possible_fields:
for sample in samples:
if getattr(sample, field) != None and getattr(sample, field) != "":
fields.append(field)
break
return fields
def update_sample_metadata_fields(self):
self.sample_metadata_fields = self.get_sample_metadata_fields()
def update_platform_names(self):
self.platform_names = self.get_platform_names()
self.platform_accession_codes = self.get_platform_accession_codes()
def get_sample_technologies(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([sample.technology for sample in self.samples.all()]))
def get_platform_names(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_name for sample in self.samples.all()]))
def get_platform_accession_codes(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_accession_code for sample in self.samples.all()]))
@property
def platforms(self):
""" Returns a list of related pipelines """
return list(set([sample.platform_name for sample in self.samples.all()]))
@property
def pretty_platforms(self):
""" Returns a prettified list of related pipelines """
return list(set([sample.pretty_platform for sample in self.samples.all()]))
@property
def processed_samples(self):
return list(
[sample.accession_code for sample in self.samples.all() if sample.is_processed == True]
)
@property
def organism_names(self):
""" Get a list of unique organism names that has at least one downloadable sample """
result = (
self.samples.filter(is_processed=True, organism__qn_target__isnull=False)
.values_list("organism__name", flat=True)
.distinct()
)
return list(result)
@property
def downloadable_samples(self):
"""
Returns the accession codes of the downloadable samples in this experiment.
This is indexed on elastic search and used to count the number of samples
on the filters.
"""
return list(
self.samples.filter(is_processed=True, organism__qn_target__isnull=False).values_list(
"accession_code", flat=True
)
)
class ExperimentAnnotation(models.Model):
""" Semi-standard information associated with an Experiment """
class Meta:
db_table = "experiment_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ExperimentAnnotation, self).save(*args, **kwargs)
class Pipeline(models.Model):
"""Pipeline that is associated with a series of ComputationalResult records."""
name = models.CharField(max_length=255)
steps = ArrayField(models.IntegerField(), default=list)
class Meta:
db_table = "pipelines"
class Processor(models.Model):
"""Processor associated with a certain ComputationalResult."""
name = models.CharField(max_length=255)
version = models.CharField(max_length=64)
docker_image = models.CharField(max_length=255)
environment = JSONField(default=dict)
class Meta:
db_table = "processors"
unique_together = ("name", "version", "docker_image", "environment")
def __str__(self):
return "Processor: %s (version: %s, docker_image: %s)" % (
self.name,
self.version,
self.docker_image,
)
class ComputationalResult(models.Model):
""" Meta-information about the output of a computer process. (Ex Salmon) """
class Meta:
db_table = "computational_results"
base_manager_name = "public_objects"
def __str__(self):
processor_name_str = ""
if self.processor:
processor_name_str = ": " + str(self.processor.name)
return "ComputationalResult " + str(self.pk) + processor_name_str
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
commands = ArrayField(models.TextField(), default=list)
processor = models.ForeignKey(Processor, blank=True, null=True, on_delete=models.CASCADE)
samples = models.ManyToManyField("Sample", through="SampleResultAssociation")
# The Organism Index used to process the sample.
organism_index = models.ForeignKey(
"OrganismIndex", blank=True, null=True, on_delete=models.SET_NULL
)
is_ccdl = models.BooleanField(default=True)
# Stats
time_start = models.DateTimeField(blank=True, null=True)
time_end = models.DateTimeField(blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResult, self).save(*args, **kwargs)
def remove_computed_files_from_s3(self):
""" Removes all associated computed files from S3. Use this before deleting a computational result. """
for computed_file in self.computedfile_set.all():
computed_file.delete_s3_file()
class ComputationalResultAnnotation(models.Model):
""" Non-standard information associated with an ComputationalResult """
class Meta:
db_table = "computational_result_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResultAnnotation, self).save(*args, **kwargs)
# Compendium Computational Result
class CompendiumResult(models.Model):
""" Computational Result For A Compendium """
class Meta:
db_table = "compendium_results"
base_manager_name = "public_objects"
def __str__(self):
return "CompendiumResult " + str(self.pk)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult,
blank=False,
null=False,
related_name="compendium_result",
on_delete=models.CASCADE,
)
primary_organism = models.ForeignKey(
Organism,
blank=False,
null=False,
related_name="primary_compendium_results",
on_delete=models.CASCADE,
)
organisms = models.ManyToManyField(
Organism, related_name="compendium_results", through="CompendiumResultOrganismAssociation"
)
# Properties
quant_sf_only = models.BooleanField(default=False)
compendium_version = models.IntegerField(blank=True, null=True)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="The SVD algorithm that was used to impute the compendium result.",
)
# Common Properties
is_public = models.BooleanField(default=True)
# helper
def get_computed_file(self):
""" Short hand method for getting the computed file for this compendium"""
return ComputedFile.objects.filter(result=self.result).first()
# TODO
# class Gene(models.Model):
""" A representation of a Gene """
# class Meta:
# db_table = "genes"
class OrganismIndex(models.Model):
""" A special type of process result, necessary for processing other SRA samples """
class Meta:
db_table = "organism_index"
base_manager_name = "public_objects"
def __str__(self):
return (
"OrganismIndex "
+ str(self.pk)
+ ": "
+ self.organism.name
+ " ["
+ self.index_type
+ "] - "
+ str(self.salmon_version)
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# ex., "TRANSCRIPTOME_LONG", "TRANSCRIPTOME_SHORT"
index_type = models.CharField(max_length=255)
# This corresponds to Ensembl's release number:
# http://ensemblgenomes.org/info/about/release_cycle
# Determined by hitting:
# http://rest.ensembl.org/info/software?content-type=application/json
source_version = models.CharField(max_length=255, default="93")
# The name of the genome assembly used which corresponds to 'GRCh38' in:
# ftp://ftp.ensembl.org/pub/release-93/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz
assembly_name = models.CharField(max_length=255, default="UNKNOWN")
# This matters, for instance salmon 0.9.0 indexes don't work with 0.10.0
salmon_version = models.CharField(max_length=255, default=CURRENT_SALMON_VERSION)
# We keep the director unextracted on the shared filesystem so all
# Salmon jobs can access it.
absolute_directory_path = models.CharField(max_length=255, blank=True, null=True, default="")
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_computed_file(self):
""" Short hand method for getting the computed file for this organism index"""
return self.result.computedfile_set.first()
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OrganismIndex, self).save(*args, **kwargs)
"""
# Files
These are the database representations of files
which live on local disk, on ephemeral storage,
or on AWS cloud services.
"""
class OriginalFile(models.Model):
""" A representation of a file from an external source """
class Meta:
db_table = "original_files"
indexes = [
models.Index(fields=["filename"]),
models.Index(fields=["source_filename"]),
]
def __str__(self):
return "OriginalFile: " + self.get_display_name()
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# File Properties
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
size_in_bytes = models.BigIntegerField(blank=True, null=True)
sha1 = models.CharField(max_length=64)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Relations
samples = models.ManyToManyField("Sample", through="OriginalFileSampleAssociation")
processor_jobs = models.ManyToManyField(
"data_refinery_common.ProcessorJob", through="ProcessorJobOriginalFileAssociation"
)
downloader_jobs = models.ManyToManyField(
"data_refinery_common.DownloaderJob", through="DownloaderJobOriginalFileAssociation"
)
# Historical Properties
source_url = models.TextField()
is_archive = models.BooleanField(default=True)
source_filename = models.CharField(max_length=255, blank=False)
# Scientific Properties
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Crunch Properties
is_downloaded = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OriginalFile, self).save(*args, **kwargs)
def set_downloaded(self, absolute_file_path, filename=None):
""" Marks the file as downloaded, if `filename` is not provided it will
be parsed from the `absolute_file_path` """
self.is_downloaded = True
self.is_archive = FileUtils.is_archive(absolute_file_path)
self.absolute_file_path = absolute_file_path
self.filename = filename if filename else os.path.basename(absolute_file_path)
self.calculate_size()
self.calculate_sha1()
self.save()
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def get_display_name(self):
""" For dev convenience """
if not self.filename:
return self.source_filename
else:
return self.filename
def get_extension(self):
""" Returns the lowercased extension of the filename
Thanks to https://stackoverflow.com/a/541408/763705 """
return FileUtils.get_extension(self.filename)
def is_blacklisted(self):
return self.get_extension() in [".xml", ".chp", ".exp"]
def delete_local_file(self):
""" Deletes this file from the local file system."""
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception(
"Unexpected delete file exception.", absolute_file_path=self.absolute_file_path
)
self.is_downloaded = False
self.save()
def has_blocking_jobs(self, own_processor_id=None) -> bool:
# If the file has a processor job that should not have been
# retried, then it still shouldn't be retried.
# Exclude the ones that were aborted.
no_retry_processor_jobs = self.processor_jobs.filter(no_retry=True).exclude(abort=True)
# If the file has a processor job that hasn't even started
# yet, then it doesn't need another.
incomplete_processor_jobs = self.processor_jobs.filter(
end_time__isnull=True, success__isnull=True, retried=False
)
if own_processor_id:
incomplete_processor_jobs = incomplete_processor_jobs.exclude(id=own_processor_id)
# Check if there's any jobs which should block another
# processing attempt.
blocking_jobs = no_retry_processor_jobs | incomplete_processor_jobs
return blocking_jobs.first() is not None
def needs_processing(self, own_processor_id=None) -> bool:
"""Returns False if original_file has been or is being processed.
Returns True otherwise.
If own_processor_id is supplied then it will be ignored so
that processor jobs can use this function without their job
being counted as currently processing this file.
"""
sample = self.samples.first()
if not sample:
return True
if self.has_blocking_jobs(own_processor_id):
return False
if sample.source_database == "SRA":
computed_file = sample.get_most_recent_smashable_result_file()
# If there's no smashable file then we should check the quant.sf file.
if not computed_file:
computed_file = sample.get_most_recent_quant_sf_file()
# If there's neither a quant.sf file nor a smashable file
# then we definitely need to process it.
if not computed_file:
return True
if (
computed_file.s3_bucket
and computed_file.s3_key
and computed_file.result.organism_index is not None
and computed_file.result.organism_index.salmon_version == CURRENT_SALMON_VERSION
):
# If the file wasn't computed with the latest
# version of salmon, then it should be rerun
# with the latest version of salmon.
return False
else:
# If this original_file has multiple samples (is an
# archive), and any of them haven't been processed, we'll
# need the entire archive in order to process any of them.
# A check to not re-processed the already processed
# samples in the archive will happen elsewhere before
# dispatching.
for sample in self.samples.all():
if not sample.is_processed:
return True
computed_file = sample.get_most_recent_smashable_result_file()
if not computed_file:
return True
if settings.RUNNING_IN_CLOUD and (
computed_file.s3_bucket is None or computed_file.s3_key is None
):
return True
return False
# If we aren't sure, prefer reprocessing over never processing.
return True
def needs_downloading(self, own_processor_id=None) -> bool:
"""Determine if a file needs to be downloaded.
This is true if the file has already been downloaded and lost
without getting processed.
"""
# If the file is downloaded and the file actually exists on disk,
# then it doens't need to be downloaded.
if self.absolute_file_path and os.path.exists(self.absolute_file_path):
return False
unstarted_downloader_jobs = self.downloader_jobs.filter(
start_time__isnull=True, success__isnull=True, retried=False
)
# If the file has a downloader job that hasn't even started yet,
# then it doesn't need another.
if unstarted_downloader_jobs.count() > 0:
return False
# If this file has been processed, then it doesn't need to be downloaded again.
return self.needs_processing(own_processor_id)
def is_affy_data(self) -> bool:
"""Return true if original_file is a CEL file or a gzipped CEL file.
"""
upper_name = self.source_filename.upper()
return (len(upper_name) > 4 and upper_name[-4:] == ".CEL") or (
len(upper_name) > 7 and upper_name[-7:] == ".CEL.GZ"
)
class ComputedFile(models.Model):
""" A representation of a file created by a data-refinery process """
class Meta:
db_table = "computed_files"
get_latest_by = "created_at"
indexes = [
models.Index(fields=["filename"]),
]
def __str__(self):
return "ComputedFile: " + str(self.filename)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Object relations
samples = models.ManyToManyField("Sample", through="SampleComputedFileAssociation")
# File related
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
# TODO: make this work w/ migrations:
# absolute_file_path = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField()
sha1 = models.CharField(max_length=64)
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# Scientific
is_smashable = models.BooleanField(default=False)
is_qc = models.BooleanField(default=False)
is_qn_target = models.BooleanField(default=False)
# Compendia details
quant_sf_only = models.BooleanField(default=False)
is_compendia = models.BooleanField(default=False)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="The SVD algorithm that was used to generate the file.",
)
compendia_organism = models.ForeignKey(
Organism, blank=True, null=True, on_delete=models.CASCADE
)
compendia_version = models.IntegerField(blank=True, null=True)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputedFile, self).save(*args, **kwargs)
def sync_to_s3(self, s3_bucket=None, s3_key=None) -> bool:
""" Syncs a file to AWS S3.
"""
if not settings.RUNNING_IN_CLOUD:
return True
self.s3_bucket = s3_bucket
self.s3_key = s3_key
try:
S3.upload_file(
self.absolute_file_path,
s3_bucket,
s3_key,
ExtraArgs={"ACL": "public-read", "StorageClass": "STANDARD_IA"},
)
self.save()
except Exception as e:
logger.exception(
"Error uploading computed file to S3",
computed_file_id=self.pk,
s3_key=self.s3_key,
s3_bucket=self.s3_bucket,
)
self.s3_bucket = None
self.s3_key = None
return False
return True
def sync_from_s3(self, force=False, path=None):
""" Downloads a file from S3 to the local file system.
Returns the absolute file path.
"""
path = path if path is not None else self.absolute_file_path
if not settings.RUNNING_IN_CLOUD and not force:
if os.path.exists(path):
return path
else:
# If the file doesn't exist at path and we're not
# running in the cloud, then the file is almost
# certainly at its absolute_file_path because it never got deleted.
if os.path.exists(self.absolute_file_path):
shutil.copyfile(self.absolute_file_path, path)
return path
else:
# We don't have the file :(
return None
target_directory = os.path.dirname(path)
os.makedirs(target_directory, exist_ok=True)
if not self.s3_bucket or not self.s3_key:
raise ValueError("Tried to download a computed file with no s3_bucket or s3_key")
try:
S3.download_file(self.s3_bucket, self.s3_key, path)
# Veryify sync integrity
synced_sha1 = calculate_sha1(path)
if self.sha1 != synced_sha1:
raise AssertionError("SHA1 of downloaded ComputedFile doesn't match database SHA1!")
return path
except Exception as e:
logger.exception(e, computed_file_id=self.pk)
return None
def change_s3_location(self, new_bucket: str, new_key: str) -> bool:
"""Moves the file from its current location in S3.
The new location will be set based on `new_bucket` and
`new_key`. The s3_bucket and s3_key properties will be updated
to reflect this on a successful move.
"""
old_bucket = self.s3_bucket
old_key = self.s3_key
copy_source = {"Bucket": old_bucket, "Key": old_key}
try:
response = S3.copy_object(Bucket=new_bucket, CopySource=copy_source, Key=new_key)
except:
logger.exception(
"Could not copy computed file within S3",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
try:
self.s3_bucket = new_bucket
self.s3_key = new_key
self.save()
except:
logger.exception(
"Could not save computed file after it was copied!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
try:
response = S3.delete_object(Bucket=old_bucket, Key=old_key)
except:
logger.exception(
"Could not delete computed file after it was copied and saved!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
return True
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def delete_local_file(self, force=False):
""" Deletes a file from the path and actually removes it from the file system."""
if not settings.RUNNING_IN_CLOUD and not force:
return
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception(
"Unexpected delete file exception.", absolute_file_path=self.absolute_file_path
)
def delete_s3_file(self, force=False):
# If we're not running in the cloud then we shouldn't try to
# delete something from S3 unless force is set.
if not settings.RUNNING_IN_CLOUD and not force:
return False
try:
S3.delete_object(Bucket=self.s3_bucket, Key=self.s3_key)
except:
logger.exception(
"Failed to delete S3 object for Computed File.",
computed_file=self.id,
s3_object=self.s3_key,
)
return False
self.s3_key = None
self.s3_bucket = None
self.save()
return True
def get_synced_file_path(self, force=False, path=None):
""" Fetches the absolute file path to this ComputedFile, fetching from S3 if it
isn't already available locally. """
if path:
if os.path.exists(path):
return path
else:
return self.sync_from_s3(force, path)
else:
if os.path.exists(self.absolute_file_path):
return self.absolute_file_path
else:
return self.sync_from_s3(force)
@property
def s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
return self.get_s3_url()
def get_s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod="get_object",
Params={"Bucket": self.s3_bucket, "Key": self.s3_key},
ExpiresIn=(60 * 60 * 7 * 24), # 7 days in seconds.
)
else:
return None
def has_been_log2scaled(self):
""" Return true if this is a smashable file that has been log2 scaled """
return self.is_smashable and self.filename.endswith("lengthScaledTPM.tsv")
class Dataset(models.Model):
""" A Dataset is a desired set of experiments/samples to smash and download """
AGGREGATE_CHOICES = (("ALL", "All"), ("EXPERIMENT", "Experiment"), ("SPECIES", "Species"))
SCALE_CHOICES = (
("NONE", "None"),
("MINMAX", "Minmax"),
("STANDARD", "Standard"),
("ROBUST", "Robust"),
)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Experiments and samples live here: {'E-ABC-1': ['SAMP1', 'SAMP2']}
# This isn't going to be queryable, so we can use JSON-in-text, just make
# sure we validate properly in and out!
data = JSONField(
default=dict,
help_text="This is a dictionary where the keys are experiment accession codes and the values are lists with sample accession codes. Eg: `{'E-ABC-1': ['SAMP1', 'SAMP2']}`",
)
# Processing properties
aggregate_by = models.CharField(
max_length=255,
choices=AGGREGATE_CHOICES,
default="EXPERIMENT",
help_text="Specifies how samples are [aggregated](http://docs.refine.bio/en/latest/main_text.html#aggregations).",
)
scale_by = models.CharField(
max_length=255,
choices=SCALE_CHOICES,
default="NONE",
help_text="Specifies options for [transformations](http://docs.refine.bio/en/latest/main_text.html#transformations).",
)
quantile_normalize = models.BooleanField(
default=True,
help_text="Part of the advanced options. Allows [skipping quantile normalization](http://docs.refine.bio/en/latest/faq.html#what-does-it-mean-to-skip-quantile-normalization-for-rna-seq-samples) for RNA-Seq samples.",
)
quant_sf_only = models.BooleanField(
default=False, help_text="Include only quant.sf files in the generated dataset."
)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="Specifies choice of SVD algorithm",
)
# State properties
is_processing = models.BooleanField(default=False) # Data is still editable when False
is_processed = models.BooleanField(default=False) # Result has been made
is_available = models.BooleanField(default=False) # Result is ready for delivery
processor_jobs = models.ManyToManyField(
"data_refinery_common.ProcessorJob", through="ProcessorJobDataSetAssociation"
)
# Fail handling
success = models.NullBooleanField(null=True)
failure_reason = models.TextField()
# Delivery properties
email_address = models.CharField(max_length=255, blank=True, null=True)
email_ccdl_ok = models.BooleanField(default=False)
expires_on = models.DateTimeField(blank=True, null=True)
# Deliverables
s3_bucket = models.CharField(max_length=255)
s3_key = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField(
blank=True,
null=True,
default=0,
help_text="Contains the size in bytes of the processed dataset.",
)
sha1 = models.CharField(max_length=64, null=True, default="")
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Dataset, self).save(*args, **kwargs)
def get_samples(self):
""" Retuns all of the Sample objects in this Dataset """
all_samples = []
for sample_list in self.data.values():
all_samples = all_samples + sample_list
all_samples = list(set(all_samples))
return Sample.objects.filter(accession_code__in=all_samples)
def get_total_samples(self):
""" Returns the total number of samples, this counts the number of unique
accession codes in `data`. """
return len(
set(
[
accession_code
for experiment in self.data.values()
for accession_code in experiment
]
)
)
def get_experiments(self):
""" Retuns all of the Experiments objects in this Dataset """
all_experiments = self.data.keys()
return Experiment.objects.filter(accession_code__in=all_experiments)
def get_samples_by_experiment(self):
""" Returns a dict of sample QuerySets, for samples grouped by experiment. """
all_samples = {}
for experiment, samples in self.data.items():
all_samples[experiment] = Sample.objects.filter(accession_code__in=samples)
return all_samples
def get_samples_by_species(self):
""" Returns a dict of sample QuerySets, for samples grouped by species. """
by_species = {}
all_samples = self.get_samples()
for sample in all_samples:
if not by_species.get(sample.organism.name, None):
by_species[sample.organism.name] = [sample]
else:
by_species[sample.organism.name].append(sample)
return by_species
def get_aggregated_samples(self):
""" Uses aggregate_by to return a smasher-ready sample dict. """
if self.aggregate_by == "ALL":
return {"ALL": self.get_samples()}
elif self.aggregate_by == "EXPERIMENT":
return self.get_samples_by_experiment()
else:
return self.get_samples_by_species()
def is_cross_technology(self):
""" Determine if this involves both Microarray + RNASeq"""
if len(self.get_samples().values("technology").distinct()) > 1:
return True
else:
return False
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod="get_object",
Params={"Bucket": self.s3_bucket, "Key": self.s3_key},
ExpiresIn=(60 * 60 * 7 * 24), # 7 days in seconds.
)
else:
return None
def s3_url(self):
""" Render the resulting S3 URL """
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def has_email(self):
""" Returns if the email is set or not """
return bool(self.email_address)
class APIToken(models.Model):
""" Required for starting a smash job """
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Activation
is_activated = models.BooleanField(default=False)
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(APIToken, self).save(*args, **kwargs)
@property
def terms_and_conditions(self):
""" """
return settings.TERMS_AND_CONDITIONS
"""
# Associations
These represent the relationships between items in the other tables.
"""
class ExperimentSampleAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_sample_associations"
unique_together = ("experiment", "sample")
class ExperimentOrganismAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_organism_associations"
unique_together = ("experiment", "organism")
class DownloaderJobOriginalFileAssociation(models.Model):
downloader_job = models.ForeignKey(
"data_refinery_common.DownloaderJob", blank=False, null=False, on_delete=models.CASCADE
)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "downloaderjob_originalfile_associations"
unique_together = ("downloader_job", "original_file")
class ProcessorJobOriginalFileAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE
)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "processorjob_originalfile_associations"
unique_together = ("processor_job", "original_file")
class ProcessorJobDatasetAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE
)
dataset = models.ForeignKey(Dataset, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "processorjob_dataset_associations"
class OriginalFileSampleAssociation(models.Model):
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "original_file_sample_associations"
unique_together = ("original_file", "sample")
class SampleResultAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_result_associations"
unique_together = ("result", "sample")
class SampleComputedFileAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
computed_file = models.ForeignKey(
ComputedFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_computed_file_associations"
unique_together = ("sample", "computed_file")
class ExperimentResultAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "experiment_result_associations"
unique_together = ("result", "experiment")
class CompendiumResultOrganismAssociation(models.Model):
compendium_result = models.ForeignKey(
CompendiumResult, blank=False, null=False, on_delete=models.CASCADE
)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "compendium_result_organism_associations"
unique_together = ("compendium_result", "organism")
<|code_end|>
|
Processing info: Sample is not available but only tximport is run on it
Ex:
https://www.refine.bio/experiments/SRP074593/gene-expression-profiling-of-drosophila-s2r-cells-following-rnai-mediated-knockdown-of-transcription-factors

We should just show the submitter supplied info, since the samples aren't really processed through our pipeline
| common/data_refinery_common/models/models.py
<|code_start|>import os
import shutil
import uuid
from typing import Set
from django.conf import settings
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import models, transaction
from django.db.models import Count, DateTimeField, Prefetch
from django.db.models.expressions import F, Q
from django.utils import timezone
import boto3
import pytz
from botocore.client import Config
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models.organism import Organism
from data_refinery_common.utils import (
FileUtils,
calculate_file_size,
calculate_sha1,
get_env_variable,
get_s3_url,
)
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client("s3", config=Config(signature_version="s3v4"))
logger = get_and_configure_logger(__name__)
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
# We store what salmon ouptuts as its version, therefore for
# comparisions or defaults we shouldn't just store the version string,
# we need something with the pattern: 'salmon X.X.X'
CURRENT_SALMON_VERSION = "salmon " + get_env_variable("SALMON_VERSION", "0.13.1")
CHUNK_SIZE = 1024 * 256 # chunk_size is in bytes
"""
# First Order Classes
This represent the primary data types we will be querying
and filtering against.
"""
class PublicObjectsManager(models.Manager):
"""
Only returns objects that have is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class ProcessedObjectsManager(models.Manager):
"""
Only returns objects that have is_processed and is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_processed=True, is_public=True)
class Sample(models.Model):
"""
An individual sample.
"""
class Meta:
db_table = "samples"
base_manager_name = "public_objects"
get_latest_by = "created_at"
indexes = [
models.Index(fields=["accession_code"]),
]
def __str__(self):
return self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_objects = ProcessedObjectsManager()
# Identifiers
accession_code = models.CharField(max_length=255, unique=True)
title = models.CharField(max_length=255, unique=False, blank=True)
# Relations
organism = models.ForeignKey(Organism, blank=True, null=True, on_delete=models.SET_NULL)
results = models.ManyToManyField("ComputationalResult", through="SampleResultAssociation")
original_files = models.ManyToManyField("OriginalFile", through="OriginalFileSampleAssociation")
computed_files = models.ManyToManyField("ComputedFile", through="SampleComputedFileAssociation")
experiments = models.ManyToManyField("Experiment", through="ExperimentSampleAssociation")
# Historical Properties
source_database = models.CharField(max_length=255, blank=False)
source_archive_url = models.CharField(max_length=255)
source_filename = models.CharField(max_length=255, blank=False)
source_absolute_file_path = models.CharField(max_length=255)
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Technological Properties
platform_accession_code = models.CharField(max_length=256, blank=True)
platform_name = models.CharField(max_length=256, blank=True)
technology = models.CharField(max_length=256, blank=True) # MICROARRAY, RNA-SEQ
manufacturer = models.CharField(max_length=256, blank=True)
protocol_info = JSONField(default=dict)
# Scientific Properties
sex = models.CharField(max_length=255, blank=True)
age = models.DecimalField(max_length=255, blank=True, max_digits=8, decimal_places=3, null=True)
specimen_part = models.CharField(max_length=255, blank=True)
genotype = models.CharField(max_length=255, blank=True)
disease = models.CharField(max_length=255, blank=True)
disease_stage = models.CharField(max_length=255, blank=True)
cell_line = models.CharField(max_length=255, blank=True)
treatment = models.CharField(max_length=255, blank=True)
race = models.CharField(max_length=255, blank=True)
subject = models.CharField(max_length=255, blank=True)
compound = models.CharField(max_length=255, blank=True)
time = models.CharField(max_length=255, blank=True)
# Crunch Properties
is_processed = models.BooleanField(default=False)
# Blacklisting
is_blacklisted = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Sample, self).save(*args, **kwargs)
def to_metadata_dict(self):
"""Render this Sample as a dict."""
metadata = {}
metadata["refinebio_title"] = self.title
metadata["refinebio_accession_code"] = self.accession_code
metadata["refinebio_organism"] = self.organism.name if self.organism else None
metadata["refinebio_source_database"] = self.source_database
metadata["refinebio_source_archive_url"] = self.source_archive_url
metadata["refinebio_sex"] = self.sex
metadata["refinebio_age"] = self.age or ""
metadata["refinebio_specimen_part"] = self.specimen_part
metadata["refinebio_genetic_information"] = self.genotype
metadata["refinebio_disease"] = self.disease
metadata["refinebio_disease_stage"] = self.disease_stage
metadata["refinebio_cell_line"] = self.cell_line
metadata["refinebio_treatment"] = self.treatment
metadata["refinebio_race"] = self.race
metadata["refinebio_subject"] = self.subject
metadata["refinebio_compound"] = self.compound
metadata["refinebio_time"] = self.time
metadata["refinebio_platform"] = self.pretty_platform
metadata["refinebio_annotations"] = [
data for data in self.sampleannotation_set.all().values_list("data", flat=True)
]
return metadata
# Returns a set of ProcessorJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_processor_jobs(self) -> Set:
processor_jobs = set()
for original_file in self.original_files.prefetch_related("processor_jobs").all():
for processor_job in original_file.processor_jobs.all():
processor_jobs.add(processor_job)
return processor_jobs
# Returns a set of DownloaderJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_downloader_jobs(self) -> Set:
downloader_jobs = set()
for original_file in self.original_files.prefetch_related("downloader_jobs").all():
for downloader_job in original_file.downloader_jobs.all():
downloader_jobs.add(downloader_job)
return downloader_jobs
def get_result_files(self):
""" Get all of the ComputedFile objects associated with this Sample """
return self.computed_files.all()
def get_most_recent_smashable_result_file(self):
""" Get the most recent of the ComputedFile objects associated with this Sample """
try:
latest_computed_file = self.computed_files.filter(
is_public=True, is_smashable=True,
).latest()
return latest_computed_file
except ComputedFile.DoesNotExist as e:
# This sample has no smashable files yet.
return None
def get_most_recent_quant_sf_file(self):
""" Returns the latest quant.sf file that was generated for this sample.
Note: We don't associate that file to the computed_files of this sample, that's
why we have to go through the computational results. """
return (
ComputedFile.objects.filter(
result__in=self.results.all(),
filename="quant.sf",
s3_key__isnull=False,
s3_bucket__isnull=False,
)
.order_by("-created_at")
.first()
)
@property
def pretty_platform(self):
""" Turns
[HT_HG-U133_Plus_PM] Affymetrix HT HG-U133+ PM Array Plate
into
Affymetrix HT HG-U133+ PM Array Plate (hthgu133pluspm)
"""
if "]" in self.platform_name:
platform_base = self.platform_name.split("]")[1].strip()
else:
platform_base = self.platform_name
return platform_base + " (" + self.platform_accession_code + ")"
class SampleAnnotation(models.Model):
""" Semi-standard information associated with a Sample """
class Meta:
db_table = "sample_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(SampleAnnotation, self).save(*args, **kwargs)
class ProcessedPublicObjectsManager(models.Manager):
"""
Only returns Experiments that are is_public and have related is_processed Samples.
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True, num_processed_samples__gt=0)
class Experiment(models.Model):
""" An Experiment or Study """
class Meta:
db_table = "experiments"
base_manager_name = "public_objects"
def __str__(self):
return "Experiment: " + self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_public_objects = ProcessedPublicObjectsManager()
# Relations
samples = models.ManyToManyField("Sample", through="ExperimentSampleAssociation")
organisms = models.ManyToManyField("Organism", through="ExperimentOrganismAssociation")
# Identifiers
accession_code = models.CharField(max_length=64, unique=True)
alternate_accession_code = models.CharField(max_length=64, unique=True, null=True)
# Historical Properties
source_database = models.CharField(max_length=32) # "ArrayExpress, "SRA", "GEO"
source_url = models.TextField()
# Properties
# I was always under the impression that TextFields were slower
# than CharFields, however the Postgres documentation disagrees:
# https://www.postgresql.org/docs/9.0/static/datatype-character.html
title = models.TextField()
description = models.TextField()
protocol_description = JSONField(default=dict)
technology = models.CharField(max_length=256, blank=True)
submitter_institution = models.CharField(max_length=256, blank=True)
has_publication = models.BooleanField(default=False)
publication_title = models.TextField(default="")
publication_doi = models.CharField(max_length=64, blank=True)
publication_authors = ArrayField(models.TextField(), default=list)
pubmed_id = models.CharField(max_length=32, blank=True)
source_first_published = models.DateTimeField(null=True)
source_last_modified = models.DateTimeField(null=True)
# Cached Computed Properties
num_total_samples = models.IntegerField(default=0)
num_processed_samples = models.IntegerField(default=0)
num_downloadable_samples = models.IntegerField(default=0)
sample_metadata_fields = ArrayField(models.TextField(), default=list)
platform_names = ArrayField(models.TextField(), default=list)
platform_accession_codes = ArrayField(models.TextField(), default=list)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
if self.accession_code and not self.alternate_accession_code:
if self.accession_code.startswith("GSE"):
self.alternate_accession_code = "E-GEOD-" + self.accession_code[3:]
elif self.accession_code.startswith("E-GEOD-"):
self.alternate_accession_code = "GSE" + self.accession_code[7:]
return super(Experiment, self).save(*args, **kwargs)
def update_num_samples(self):
""" Update our cache values """
aggregates = self.samples.aggregate(
num_total_samples=Count("id"),
num_processed_samples=Count("id", filter=Q(is_processed=True)),
num_downloadable_samples=Count(
"id", filter=Q(is_processed=True, organism__qn_target__isnull=False)
),
)
self.num_total_samples = aggregates["num_total_samples"]
self.num_processed_samples = aggregates["num_processed_samples"]
self.num_downloadable_samples = aggregates["num_downloadable_samples"]
self.save()
def to_metadata_dict(self):
""" Render this Experiment as a dict """
metadata = {}
metadata["title"] = self.title
metadata["accession_code"] = self.accession_code
metadata["organisms"] = list(self.organisms.all().values_list("name", flat=True))
metadata["sample_accession_codes"] = list(
self.samples.all().values_list("accession_code", flat=True)
)
metadata["description"] = self.description
metadata["protocol_description"] = self.protocol_description
metadata["technology"] = self.technology
metadata["submitter_institution"] = self.submitter_institution
metadata["has_publication"] = self.has_publication
metadata["publication_title"] = self.publication_title
metadata["publication_doi"] = self.publication_doi
metadata["pubmed_id"] = self.pubmed_id
if self.source_first_published:
metadata["source_first_published"] = self.source_first_published.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_first_published"] = ""
if self.source_last_modified:
metadata["source_last_modified"] = self.source_last_modified.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_last_modified"] = ""
return metadata
def get_sample_metadata_fields(self):
""" Get all metadata fields that are non-empty for at least one sample in the experiment.
See https://github.com/AlexsLemonade/refinebio-frontend/issues/211 for why this is needed.
"""
fields = []
possible_fields = [
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
]
samples = self.samples.all()
for field in possible_fields:
for sample in samples:
if getattr(sample, field) != None and getattr(sample, field) != "":
fields.append(field)
break
return fields
def update_sample_metadata_fields(self):
self.sample_metadata_fields = self.get_sample_metadata_fields()
def update_platform_names(self):
self.platform_names = self.get_platform_names()
self.platform_accession_codes = self.get_platform_accession_codes()
def get_sample_technologies(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([sample.technology for sample in self.samples.all()]))
def get_platform_names(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_name for sample in self.samples.all()]))
def get_platform_accession_codes(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_accession_code for sample in self.samples.all()]))
@property
def platforms(self):
""" Returns a list of related pipelines """
return list(set([sample.platform_name for sample in self.samples.all()]))
@property
def pretty_platforms(self):
""" Returns a prettified list of related pipelines """
return list(set([sample.pretty_platform for sample in self.samples.all()]))
@property
def processed_samples(self):
return list(
[sample.accession_code for sample in self.samples.all() if sample.is_processed == True]
)
@property
def organism_names(self):
""" Get a list of unique organism names that has at least one downloadable sample """
result = (
self.samples.filter(is_processed=True, organism__qn_target__isnull=False)
.values_list("organism__name", flat=True)
.distinct()
)
return list(result)
@property
def downloadable_samples(self):
"""
Returns the accession codes of the downloadable samples in this experiment.
This is indexed on elastic search and used to count the number of samples
on the filters.
"""
return list(
self.samples.filter(is_processed=True, organism__qn_target__isnull=False).values_list(
"accession_code", flat=True
)
)
class ExperimentAnnotation(models.Model):
""" Semi-standard information associated with an Experiment """
class Meta:
db_table = "experiment_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ExperimentAnnotation, self).save(*args, **kwargs)
class Pipeline(models.Model):
"""Pipeline that is associated with a series of ComputationalResult records."""
name = models.CharField(max_length=255)
steps = ArrayField(models.IntegerField(), default=list)
class Meta:
db_table = "pipelines"
class Processor(models.Model):
"""Processor associated with a certain ComputationalResult."""
name = models.CharField(max_length=255)
version = models.CharField(max_length=64)
docker_image = models.CharField(max_length=255)
environment = JSONField(default=dict)
class Meta:
db_table = "processors"
unique_together = ("name", "version", "docker_image", "environment")
def __str__(self):
return "Processor: %s (version: %s, docker_image: %s)" % (
self.name,
self.version,
self.docker_image,
)
class ComputationalResult(models.Model):
""" Meta-information about the output of a computer process. (Ex Salmon) """
class Meta:
db_table = "computational_results"
base_manager_name = "public_objects"
def __str__(self):
processor_name_str = ""
if self.processor:
processor_name_str = ": " + str(self.processor.name)
return "ComputationalResult " + str(self.pk) + processor_name_str
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
commands = ArrayField(models.TextField(), default=list)
processor = models.ForeignKey(Processor, blank=True, null=True, on_delete=models.CASCADE)
samples = models.ManyToManyField("Sample", through="SampleResultAssociation")
# The Organism Index used to process the sample.
organism_index = models.ForeignKey(
"OrganismIndex", blank=True, null=True, on_delete=models.SET_NULL
)
is_ccdl = models.BooleanField(default=True)
# Stats
time_start = models.DateTimeField(blank=True, null=True)
time_end = models.DateTimeField(blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResult, self).save(*args, **kwargs)
def remove_computed_files_from_s3(self):
""" Removes all associated computed files from S3. Use this before deleting a computational result. """
for computed_file in self.computedfile_set.all():
computed_file.delete_s3_file()
class ComputationalResultAnnotation(models.Model):
""" Non-standard information associated with an ComputationalResult """
class Meta:
db_table = "computational_result_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResultAnnotation, self).save(*args, **kwargs)
# Compendium Computational Result
class CompendiumResult(models.Model):
""" Computational Result For A Compendium """
class Meta:
db_table = "compendium_results"
base_manager_name = "public_objects"
def __str__(self):
return "CompendiumResult " + str(self.pk)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult,
blank=False,
null=False,
related_name="compendium_result",
on_delete=models.CASCADE,
)
primary_organism = models.ForeignKey(
Organism,
blank=False,
null=False,
related_name="primary_compendium_results",
on_delete=models.CASCADE,
)
organisms = models.ManyToManyField(
Organism, related_name="compendium_results", through="CompendiumResultOrganismAssociation"
)
# Properties
quant_sf_only = models.BooleanField(default=False)
compendium_version = models.IntegerField(blank=True, null=True)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="The SVD algorithm that was used to impute the compendium result.",
)
# Common Properties
is_public = models.BooleanField(default=True)
# helper
def get_computed_file(self):
""" Short hand method for getting the computed file for this compendium"""
return ComputedFile.objects.filter(result=self.result).first()
# TODO
# class Gene(models.Model):
""" A representation of a Gene """
# class Meta:
# db_table = "genes"
class OrganismIndex(models.Model):
""" A special type of process result, necessary for processing other SRA samples """
class Meta:
db_table = "organism_index"
base_manager_name = "public_objects"
def __str__(self):
return (
"OrganismIndex "
+ str(self.pk)
+ ": "
+ self.organism.name
+ " ["
+ self.index_type
+ "] - "
+ str(self.salmon_version)
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# ex., "TRANSCRIPTOME_LONG", "TRANSCRIPTOME_SHORT"
index_type = models.CharField(max_length=255)
# This corresponds to Ensembl's release number:
# http://ensemblgenomes.org/info/about/release_cycle
# Determined by hitting:
# http://rest.ensembl.org/info/software?content-type=application/json
source_version = models.CharField(max_length=255, default="93")
# The name of the genome assembly used which corresponds to 'GRCh38' in:
# ftp://ftp.ensembl.org/pub/release-93/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz
assembly_name = models.CharField(max_length=255, default="UNKNOWN")
# This matters, for instance salmon 0.9.0 indexes don't work with 0.10.0
salmon_version = models.CharField(max_length=255, default=CURRENT_SALMON_VERSION)
# We keep the director unextracted on the shared filesystem so all
# Salmon jobs can access it.
absolute_directory_path = models.CharField(max_length=255, blank=True, null=True, default="")
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_computed_file(self):
""" Short hand method for getting the computed file for this organism index"""
return self.result.computedfile_set.first()
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OrganismIndex, self).save(*args, **kwargs)
"""
# Files
These are the database representations of files
which live on local disk, on ephemeral storage,
or on AWS cloud services.
"""
class OriginalFile(models.Model):
""" A representation of a file from an external source """
class Meta:
db_table = "original_files"
indexes = [
models.Index(fields=["filename"]),
models.Index(fields=["source_filename"]),
]
def __str__(self):
return "OriginalFile: " + self.get_display_name()
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# File Properties
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
size_in_bytes = models.BigIntegerField(blank=True, null=True)
sha1 = models.CharField(max_length=64)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Relations
samples = models.ManyToManyField("Sample", through="OriginalFileSampleAssociation")
processor_jobs = models.ManyToManyField(
"data_refinery_common.ProcessorJob", through="ProcessorJobOriginalFileAssociation"
)
downloader_jobs = models.ManyToManyField(
"data_refinery_common.DownloaderJob", through="DownloaderJobOriginalFileAssociation"
)
# Historical Properties
source_url = models.TextField()
is_archive = models.BooleanField(default=True)
source_filename = models.CharField(max_length=255, blank=False)
# Scientific Properties
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Crunch Properties
is_downloaded = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OriginalFile, self).save(*args, **kwargs)
def set_downloaded(self, absolute_file_path, filename=None):
""" Marks the file as downloaded, if `filename` is not provided it will
be parsed from the `absolute_file_path` """
self.is_downloaded = True
self.is_archive = FileUtils.is_archive(absolute_file_path)
self.absolute_file_path = absolute_file_path
self.filename = filename if filename else os.path.basename(absolute_file_path)
self.calculate_size()
self.calculate_sha1()
self.save()
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def get_display_name(self):
""" For dev convenience """
if not self.filename:
return self.source_filename
else:
return self.filename
def get_extension(self):
""" Returns the lowercased extension of the filename
Thanks to https://stackoverflow.com/a/541408/763705 """
return FileUtils.get_extension(self.filename)
def is_blacklisted(self):
return self.get_extension() in [".xml", ".chp", ".exp"]
def delete_local_file(self):
""" Deletes this file from the local file system."""
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception:
logger.exception(
"Unexpected delete file exception.", absolute_file_path=self.absolute_file_path
)
self.is_downloaded = False
self.save()
def has_blocking_jobs(self, own_processor_id=None) -> bool:
# If the file has a processor job that should not have been
# retried, then it still shouldn't be retried.
# Exclude the ones that were aborted.
no_retry_processor_jobs = self.processor_jobs.filter(no_retry=True).exclude(abort=True)
# If the file has a processor job that hasn't even started
# yet, then it doesn't need another.
incomplete_processor_jobs = self.processor_jobs.filter(
end_time__isnull=True, success__isnull=True, retried=False
)
if own_processor_id:
incomplete_processor_jobs = incomplete_processor_jobs.exclude(id=own_processor_id)
# Check if there's any jobs which should block another
# processing attempt.
blocking_jobs = no_retry_processor_jobs | incomplete_processor_jobs
return blocking_jobs.first() is not None
def needs_processing(self, own_processor_id=None) -> bool:
"""Returns False if original_file has been or is being processed.
Returns True otherwise.
If own_processor_id is supplied then it will be ignored so
that processor jobs can use this function without their job
being counted as currently processing this file.
"""
sample = self.samples.first()
if not sample:
return True
if self.has_blocking_jobs(own_processor_id):
return False
if sample.source_database == "SRA":
computed_file = sample.get_most_recent_smashable_result_file()
# If there's no smashable file then we should check the quant.sf file.
if not computed_file:
computed_file = sample.get_most_recent_quant_sf_file()
# If there's neither a quant.sf file nor a smashable file
# then we definitely need to process it.
if not computed_file:
return True
if (
computed_file.s3_bucket
and computed_file.s3_key
and computed_file.result.organism_index is not None
and computed_file.result.organism_index.salmon_version == CURRENT_SALMON_VERSION
):
# If the file wasn't computed with the latest
# version of salmon, then it should be rerun
# with the latest version of salmon.
return False
else:
# If this original_file has multiple samples (is an
# archive), and any of them haven't been processed, we'll
# need the entire archive in order to process any of them.
# A check to not re-processed the already processed
# samples in the archive will happen elsewhere before
# dispatching.
for sample in self.samples.all():
if not sample.is_processed:
return True
computed_file = sample.get_most_recent_smashable_result_file()
if not computed_file:
return True
if settings.RUNNING_IN_CLOUD and (
computed_file.s3_bucket is None or computed_file.s3_key is None
):
return True
return False
# If we aren't sure, prefer reprocessing over never processing.
return True
def needs_downloading(self, own_processor_id=None) -> bool:
"""Determine if a file needs to be downloaded.
This is true if the file has already been downloaded and lost
without getting processed.
"""
# If the file is downloaded and the file actually exists on disk,
# then it doens't need to be downloaded.
if self.absolute_file_path and os.path.exists(self.absolute_file_path):
return False
unstarted_downloader_jobs = self.downloader_jobs.filter(
start_time__isnull=True, success__isnull=True, retried=False
)
# If the file has a downloader job that hasn't even started yet,
# then it doesn't need another.
if unstarted_downloader_jobs.count() > 0:
return False
# If this file has been processed, then it doesn't need to be downloaded again.
return self.needs_processing(own_processor_id)
def is_affy_data(self) -> bool:
"""Return true if original_file is a CEL file or a gzipped CEL file.
"""
upper_name = self.source_filename.upper()
return (len(upper_name) > 4 and upper_name[-4:] == ".CEL") or (
len(upper_name) > 7 and upper_name[-7:] == ".CEL.GZ"
)
class ComputedFile(models.Model):
""" A representation of a file created by a data-refinery process """
class Meta:
db_table = "computed_files"
get_latest_by = "created_at"
indexes = [
models.Index(fields=["filename"]),
]
def __str__(self):
return "ComputedFile: " + str(self.filename)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Object relations
samples = models.ManyToManyField("Sample", through="SampleComputedFileAssociation")
# File related
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
# TODO: make this work w/ migrations:
# absolute_file_path = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField()
sha1 = models.CharField(max_length=64)
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# Scientific
is_smashable = models.BooleanField(default=False)
is_qc = models.BooleanField(default=False)
is_qn_target = models.BooleanField(default=False)
# Compendia details
quant_sf_only = models.BooleanField(default=False)
is_compendia = models.BooleanField(default=False)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="The SVD algorithm that was used to generate the file.",
)
compendia_organism = models.ForeignKey(
Organism, blank=True, null=True, on_delete=models.CASCADE
)
compendia_version = models.IntegerField(blank=True, null=True)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputedFile, self).save(*args, **kwargs)
def sync_to_s3(self, s3_bucket=None, s3_key=None) -> bool:
""" Syncs a file to AWS S3.
"""
if not settings.RUNNING_IN_CLOUD:
return True
self.s3_bucket = s3_bucket
self.s3_key = s3_key
try:
S3.upload_file(
self.absolute_file_path,
s3_bucket,
s3_key,
ExtraArgs={"ACL": "public-read", "StorageClass": "STANDARD_IA"},
)
self.save()
except Exception:
logger.exception(
"Error uploading computed file to S3",
computed_file_id=self.pk,
s3_key=self.s3_key,
s3_bucket=self.s3_bucket,
)
self.s3_bucket = None
self.s3_key = None
return False
return True
def sync_from_s3(self, force=False, path=None):
""" Downloads a file from S3 to the local file system.
Returns the absolute file path.
"""
path = path if path is not None else self.absolute_file_path
if not settings.RUNNING_IN_CLOUD and not force:
if os.path.exists(path):
return path
else:
# If the file doesn't exist at path and we're not
# running in the cloud, then the file is almost
# certainly at its absolute_file_path because it never got deleted.
if os.path.exists(self.absolute_file_path):
shutil.copyfile(self.absolute_file_path, path)
return path
else:
# We don't have the file :(
return None
target_directory = os.path.dirname(path)
os.makedirs(target_directory, exist_ok=True)
if not self.s3_bucket or not self.s3_key:
raise ValueError("Tried to download a computed file with no s3_bucket or s3_key")
try:
S3.download_file(self.s3_bucket, self.s3_key, path)
# Veryify sync integrity
synced_sha1 = calculate_sha1(path)
if self.sha1 != synced_sha1:
raise AssertionError("SHA1 of downloaded ComputedFile doesn't match database SHA1!")
return path
except Exception as e:
logger.exception(e, computed_file_id=self.pk)
return None
def change_s3_location(self, new_bucket: str, new_key: str) -> bool:
"""Moves the file from its current location in S3.
The new location will be set based on `new_bucket` and
`new_key`. The s3_bucket and s3_key properties will be updated
to reflect this on a successful move.
"""
old_bucket = self.s3_bucket
old_key = self.s3_key
copy_source = {"Bucket": old_bucket, "Key": old_key}
try:
response = S3.copy_object(Bucket=new_bucket, CopySource=copy_source, Key=new_key)
except:
logger.exception(
"Could not copy computed file within S3",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
try:
self.s3_bucket = new_bucket
self.s3_key = new_key
self.save()
except:
logger.exception(
"Could not save computed file after it was copied!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
try:
response = S3.delete_object(Bucket=old_bucket, Key=old_key)
except:
logger.exception(
"Could not delete computed file after it was copied and saved!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
return True
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def delete_local_file(self, force=False):
""" Deletes a file from the path and actually removes it from the file system."""
if not settings.RUNNING_IN_CLOUD and not force:
return
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception:
logger.exception(
"Unexpected delete file exception.", absolute_file_path=self.absolute_file_path
)
def delete_s3_file(self, force=False):
# If we're not running in the cloud then we shouldn't try to
# delete something from S3 unless force is set.
if not settings.RUNNING_IN_CLOUD and not force:
return False
try:
S3.delete_object(Bucket=self.s3_bucket, Key=self.s3_key)
except:
logger.exception(
"Failed to delete S3 object for Computed File.",
computed_file=self.id,
s3_object=self.s3_key,
)
return False
self.s3_key = None
self.s3_bucket = None
self.save()
return True
def get_synced_file_path(self, force=False, path=None):
""" Fetches the absolute file path to this ComputedFile, fetching from S3 if it
isn't already available locally. """
if path:
if os.path.exists(path):
return path
else:
return self.sync_from_s3(force, path)
else:
if os.path.exists(self.absolute_file_path):
return self.absolute_file_path
else:
return self.sync_from_s3(force)
@property
def s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
return self.get_s3_url()
def get_s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod="get_object",
Params={"Bucket": self.s3_bucket, "Key": self.s3_key},
ExpiresIn=(60 * 60 * 7 * 24), # 7 days in seconds.
)
else:
return None
def has_been_log2scaled(self):
""" Return true if this is a smashable file that has been log2 scaled """
return self.is_smashable and self.filename.endswith("lengthScaledTPM.tsv")
class Dataset(models.Model):
""" A Dataset is a desired set of experiments/samples to smash and download """
AGGREGATE_CHOICES = (("ALL", "All"), ("EXPERIMENT", "Experiment"), ("SPECIES", "Species"))
SCALE_CHOICES = (
("NONE", "None"),
("MINMAX", "Minmax"),
("STANDARD", "Standard"),
("ROBUST", "Robust"),
)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Experiments and samples live here: {'E-ABC-1': ['SAMP1', 'SAMP2']}
# This isn't going to be queryable, so we can use JSON-in-text, just make
# sure we validate properly in and out!
data = JSONField(
default=dict,
help_text="This is a dictionary where the keys are experiment accession codes and the values are lists with sample accession codes. Eg: `{'E-ABC-1': ['SAMP1', 'SAMP2']}`",
)
# Processing properties
aggregate_by = models.CharField(
max_length=255,
choices=AGGREGATE_CHOICES,
default="EXPERIMENT",
help_text="Specifies how samples are [aggregated](http://docs.refine.bio/en/latest/main_text.html#aggregations).",
)
scale_by = models.CharField(
max_length=255,
choices=SCALE_CHOICES,
default="NONE",
help_text="Specifies options for [transformations](http://docs.refine.bio/en/latest/main_text.html#transformations).",
)
quantile_normalize = models.BooleanField(
default=True,
help_text="Part of the advanced options. Allows [skipping quantile normalization](http://docs.refine.bio/en/latest/faq.html#what-does-it-mean-to-skip-quantile-normalization-for-rna-seq-samples) for RNA-Seq samples.",
)
quant_sf_only = models.BooleanField(
default=False, help_text="Include only quant.sf files in the generated dataset."
)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="Specifies choice of SVD algorithm",
)
# State properties
is_processing = models.BooleanField(default=False) # Data is still editable when False
is_processed = models.BooleanField(default=False) # Result has been made
is_available = models.BooleanField(default=False) # Result is ready for delivery
processor_jobs = models.ManyToManyField(
"data_refinery_common.ProcessorJob", through="ProcessorJobDataSetAssociation"
)
# Fail handling
success = models.NullBooleanField(null=True)
failure_reason = models.TextField()
# Delivery properties
email_address = models.CharField(max_length=255, blank=True, null=True)
email_ccdl_ok = models.BooleanField(default=False)
expires_on = models.DateTimeField(blank=True, null=True)
# Deliverables
s3_bucket = models.CharField(max_length=255)
s3_key = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField(
blank=True,
null=True,
default=0,
help_text="Contains the size in bytes of the processed dataset.",
)
sha1 = models.CharField(max_length=64, null=True, default="")
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Dataset, self).save(*args, **kwargs)
def get_samples(self):
""" Retuns all of the Sample objects in this Dataset """
all_samples = []
for sample_list in self.data.values():
all_samples = all_samples + sample_list
all_samples = list(set(all_samples))
return Sample.objects.filter(accession_code__in=all_samples)
def get_total_samples(self):
""" Returns the total number of samples, this counts the number of unique
accession codes in `data`. """
return len(
set(
[
accession_code
for experiment in self.data.values()
for accession_code in experiment
]
)
)
def get_experiments(self):
""" Retuns all of the Experiments objects in this Dataset """
all_experiments = self.data.keys()
return Experiment.objects.filter(accession_code__in=all_experiments)
def get_samples_by_experiment(self):
""" Returns a dict of sample QuerySets, for samples grouped by experiment. """
all_samples = {}
for experiment, samples in self.data.items():
all_samples[experiment] = Sample.objects.filter(accession_code__in=samples)
return all_samples
def get_samples_by_species(self):
""" Returns a dict of sample QuerySets, for samples grouped by species. """
by_species = {}
all_samples = self.get_samples()
for sample in all_samples:
if not by_species.get(sample.organism.name, None):
by_species[sample.organism.name] = [sample]
else:
by_species[sample.organism.name].append(sample)
return by_species
def get_aggregated_samples(self):
""" Uses aggregate_by to return a smasher-ready sample dict. """
if self.aggregate_by == "ALL":
return {"ALL": self.get_samples()}
elif self.aggregate_by == "EXPERIMENT":
return self.get_samples_by_experiment()
else:
return self.get_samples_by_species()
def is_cross_technology(self):
""" Determine if this involves both Microarray + RNASeq"""
if len(self.get_samples().values("technology").distinct()) > 1:
return True
else:
return False
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod="get_object",
Params={"Bucket": self.s3_bucket, "Key": self.s3_key},
ExpiresIn=(60 * 60 * 7 * 24), # 7 days in seconds.
)
else:
return None
def s3_url(self):
""" Render the resulting S3 URL """
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def has_email(self):
""" Returns if the email is set or not """
return bool(self.email_address)
class APIToken(models.Model):
""" Required for starting a smash job """
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Activation
is_activated = models.BooleanField(default=False)
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(APIToken, self).save(*args, **kwargs)
@property
def terms_and_conditions(self):
""" """
return settings.TERMS_AND_CONDITIONS
"""
# Associations
These represent the relationships between items in the other tables.
"""
class ExperimentSampleAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_sample_associations"
unique_together = ("experiment", "sample")
class ExperimentOrganismAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_organism_associations"
unique_together = ("experiment", "organism")
class DownloaderJobOriginalFileAssociation(models.Model):
downloader_job = models.ForeignKey(
"data_refinery_common.DownloaderJob", blank=False, null=False, on_delete=models.CASCADE
)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "downloaderjob_originalfile_associations"
unique_together = ("downloader_job", "original_file")
class ProcessorJobOriginalFileAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE
)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "processorjob_originalfile_associations"
unique_together = ("processor_job", "original_file")
class ProcessorJobDatasetAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE
)
dataset = models.ForeignKey(Dataset, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "processorjob_dataset_associations"
class OriginalFileSampleAssociation(models.Model):
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "original_file_sample_associations"
unique_together = ("original_file", "sample")
class SampleResultAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_result_associations"
unique_together = ("result", "sample")
class SampleComputedFileAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
computed_file = models.ForeignKey(
ComputedFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_computed_file_associations"
unique_together = ("sample", "computed_file")
class ExperimentResultAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "experiment_result_associations"
unique_together = ("result", "experiment")
class CompendiumResultOrganismAssociation(models.Model):
compendium_result = models.ForeignKey(
CompendiumResult, blank=False, null=False, on_delete=models.CASCADE
)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "compendium_result_organism_associations"
unique_together = ("compendium_result", "organism")
<|code_end|>
common/data_refinery_common/rna_seq.py
<|code_start|>from typing import List
from django.db.models import OuterRef, Subquery
from data_refinery_common.job_lookup import ProcessorEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import ComputationalResult, ComputedFile, Experiment
from data_refinery_common.utils import get_env_variable
logger = get_and_configure_logger(__name__)
# Some experiments won't be entirely processed, but we'd still like to
# make the samples we can process available. This means we need to run
# tximport on the experiment before 100% of the samples are processed
# individually.
# This idea has been discussed here: https://github.com/AlexsLemonade/refinebio/issues/909
# The consensus is that this is a good idea, but that we need a cutoff
# to determine which experiments have enough data to have tximport run
# on them early. Candace ran an experiment to find these cutoff
# values and recorded the results of this experiment here:
# https://github.com/AlexsLemonade/tximport_partial_run_tests/pull/3
# The gist of that discussion/experiment is that we need two cutoff
# values, one for a minimum size experiment that can be processed
# early and the percentage of completion necessary before we can
# run tximport on the experiment. The values we decided on are:
EARLY_TXIMPORT_MIN_SIZE = 25
EARLY_TXIMPORT_MIN_PERCENT = 0.80
def should_run_tximport(experiment: Experiment, results, is_tximport_job: bool):
""" Returns whether or not the experiment is eligible to have tximport
run on it.
results is a queryset of ComputationalResults for the samples that had salmon quant run on them.
"""
num_quantified = results.count()
if num_quantified == 0:
return False
num_salmon_versions = (
results.filter(organism_index__salmon_version__isnull=False)
.values_list("organism_index__salmon_version")
.distinct()
.count()
)
if num_salmon_versions > 1:
# Tximport requires that all samples are processed with the same salmon version
# https://github.com/AlexsLemonade/refinebio/issues/1496
return False
eligible_samples = experiment.samples.filter(source_database="SRA", technology="RNA-SEQ")
num_eligible_samples = eligible_samples.count()
if num_eligible_samples == 0:
return False
percent_complete = num_quantified / num_eligible_samples
if percent_complete == 1.0:
# If an experiment is fully quantified then we should run
# tximport regardless of its size.
return True
if (
is_tximport_job
and num_eligible_samples >= EARLY_TXIMPORT_MIN_SIZE
and percent_complete >= EARLY_TXIMPORT_MIN_PERCENT
):
return True
else:
return False
def get_quant_results_for_experiment(experiment: Experiment, filter_old_versions=True):
"""Returns a list of salmon quant results from `experiment`."""
# Subquery to calculate quant results
# https://docs.djangoproject.com/en/2.2/ref/models/expressions/#subquery-expressions
# Salmon version gets saved as what salmon outputs, which includes this prefix.
current_salmon_version = "salmon " + get_env_variable("SALMON_VERSION", "0.13.1")
if filter_old_versions:
eligible_results = ComputationalResult.objects.prefetch_related("organism_index").filter(
organism_index__salmon_version=current_salmon_version
)
else:
eligible_results = ComputationalResult.objects.all()
# A result is only eligible to be used if it actually got uploaded.
eligible_results = eligible_results.select_related("computedfile").filter(
computedfile__s3_bucket__isnull=False, computedfile__s3_key__isnull=False
)
# Calculate the computational results sorted that are associated with a given sample (
# referenced from the top query)
newest_computational_results = eligible_results.filter(
samples=OuterRef("id"), processor__name=ProcessorEnum.SALMON_QUANT.value["name"],
).order_by("-created_at")
# Annotate each sample in the experiment with the id of the most recent computational result
computational_results_ids = (
experiment.samples.all()
.annotate(
latest_computational_result_id=Subquery(newest_computational_results.values("id")[:1])
)
.filter(latest_computational_result_id__isnull=False)
.values_list("latest_computational_result_id", flat=True)
)
# return the computational results that match those ids
return ComputationalResult.objects.all().filter(id__in=computational_results_ids)
def get_quant_files_for_results(results: List[ComputationalResult]):
"""Returns a list of salmon quant results from `experiment`."""
quant_files = []
for result in results:
try:
quant_files.append(
ComputedFile.objects.filter(
result=result,
filename="quant.sf",
s3_key__isnull=False,
s3_bucket__isnull=False,
).order_by("-id")[0]
)
except Exception as e:
try:
sample = result.samples.first()
except:
sample = None
logger.exception(
"Salmon quant result found without quant.sf ComputedFile!",
quant_result=result.id,
sample=sample.id,
experiment=experiment.id,
)
raise e
return quant_files
ENA_DOWNLOAD_URL_TEMPLATE = (
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/{short_accession}{sub_dir}"
"/{long_accession}/{long_accession}{read_suffix}.fastq.gz"
)
ENA_SUB_DIR_PREFIX = "/00"
def _build_ena_file_url(run_accession: str, read_suffix=""):
# ENA has a weird way of nesting data: if the run accession is
# greater than 9 characters long then there is an extra
# sub-directory in the path which is "00" + the last digit of
# the run accession.
sub_dir = ""
if len(run_accession) > 9:
sub_dir = ENA_SUB_DIR_PREFIX + run_accession[-1]
return ENA_DOWNLOAD_URL_TEMPLATE.format(
short_accession=run_accession[:6],
sub_dir=sub_dir,
long_accession=run_accession,
read_suffix=read_suffix,
)
<|code_end|>
workers/data_refinery_workers/processors/salmon.py
<|code_start|>import json
import os
import re
import shutil
import subprocess
import tarfile
from typing import Dict, List
from django.conf import settings
from django.db import transaction
from django.utils import timezone
import boto3
import numpy as np
import pandas as pd
import untangle
from botocore.client import Config
from data_refinery_common.job_lookup import Downloaders, PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Experiment,
ExperimentSampleAssociation,
OrganismIndex,
Pipeline,
Processor,
Sample,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_common.rna_seq import (
get_quant_files_for_results,
get_quant_results_for_experiment,
should_run_tximport,
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client("s3", config=Config(signature_version="s3v4"))
logger = get_and_configure_logger(__name__)
JOB_DIR_PREFIX = "processor_job_"
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
def _set_job_prefix(job_context: Dict) -> Dict:
""" Sets the `job_dir_prefix` value in the job context object."""
job_context["job_dir_prefix"] = JOB_DIR_PREFIX + str(job_context["job_id"])
return job_context
def _prepare_files(job_context: Dict) -> Dict:
"""Moves the file(s) from the raw directory to the temp directory.
Also adds the keys "input_file_path" and "output_directory" to
job_context so everything is prepared for processing. If the reads
are paired then there will also be an "input_file_path_2" key
added to job_context for the second read.
"""
logger.debug("Preparing files..")
# Create a directory specific to this processor job combo.
# (A single sample could belong to multiple experiments, meaning
# that it could be run more than once, potentially even at the
# same time.)
job_context["work_dir"] = os.path.join(LOCAL_ROOT_DIR, job_context["job_dir_prefix"]) + "/"
os.makedirs(job_context["work_dir"], exist_ok=True)
original_files = job_context["original_files"]
job_context["input_file_path"] = original_files[0].absolute_file_path
if not os.path.exists(job_context["input_file_path"]):
logger.error(
"Was told to process a non-existent file - why did this happen?",
input_file_path=job_context["input_file_path"],
processor_job=job_context["job_id"],
)
job_context["job"].failure_reason = "Missing input file: " + str(
job_context["input_file_path"]
)
job_context["success"] = False
return job_context
if len(original_files) == 2:
job_context["input_file_path_2"] = original_files[1].absolute_file_path
if not os.path.exists(job_context["input_file_path_2"]):
logger.error(
"Was told to process a non-existent file2 - why did this happen?",
input_file_path=job_context["input_file_path_2"],
processor_job=job_context["job_id"],
)
job_context["job"].failure_reason = "Missing input file2: " + str(
job_context["input_file_path_2"]
)
job_context["success"] = False
return job_context
# There should only ever be one per Salmon run
sample = job_context["original_files"][0].samples.first()
# This check was added to ensure that we don't process any RNA-Seq
# samples from GEO, but for the time being we really don't want to
# run salmon on anything that's not from SRA. See
# https://github.com/AlexsLemonade/refinebio/issues/966 for more
# information.
if sample.technology != "RNA-SEQ" or sample.source_database != "SRA":
failure_reason = (
"The sample for this job either was not RNA-Seq or was not from the " "SRA database."
)
job_context["failure_reason"] = failure_reason
logger.error(failure_reason, sample=sample, processor_job=job_context["job_id"])
# No need to retry and fail more than once for this reason.
job_context["success"] = False
job_context["job"].failure_reason = failure_reason
job_context["job"].no_retry = True
return job_context
# Detect that this is an SRA file from the source URL
if ("ncbi.nlm.nih.gov" in job_context["original_files"][0].source_url) or (
job_context["input_file_path"][-4:].upper() == ".SRA"
):
new_input_file_path = os.path.join(job_context["work_dir"], original_files[0].filename)
shutil.copyfile(job_context["input_file_path"], new_input_file_path)
job_context["input_file_path"] = new_input_file_path
job_context["sra_input_file_path"] = new_input_file_path
if job_context.get("input_file_path_2", False):
new_input_file_path = os.path.join(job_context["work_dir"], original_files[1].filename)
shutil.copyfile(job_context["input_file_path_2"], new_input_file_path)
job_context["input_file_path_2"] = new_input_file_path
job_context["sample_accession_code"] = sample.accession_code
job_context["sample"] = sample
job_context["samples"] = [] # This will only be populated in the `tximport` job
job_context["organism"] = job_context["sample"].organism
job_context["success"] = True
job_context["output_directory"] = job_context["work_dir"] + sample.accession_code + "_output/"
os.makedirs(job_context["output_directory"], exist_ok=True)
job_context["salmontools_directory"] = job_context["work_dir"] + "salmontools/"
os.makedirs(job_context["salmontools_directory"], exist_ok=True)
job_context["salmontools_archive"] = job_context["work_dir"] + "salmontools-result.tar.gz"
timestamp = str(timezone.now().timestamp()).split(".")[0]
job_context["output_archive"] = job_context["work_dir"] + "result-" + timestamp + ".tar.gz"
job_context["computed_files"] = []
job_context["smashable_files"] = []
return job_context
def _determine_index_length_sra(job_context: Dict) -> Dict:
"""
Use the sra-stat tool to determine length
ex:
sra-stat -x --statistics ERR1562482.sra
"""
command_str = "sra-stat -x --statistics {sra_file}"
formatted_command = command_str.format(sra_file=job_context["sra_input_file_path"])
logger.debug(
"Running sra-stat using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"],
)
completed_command = subprocess.run(
formatted_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
respo = completed_command.stdout.decode().strip()
try:
stats = untangle.parse(respo)
except ValueError:
logger.error(
"Unable to parse sra-stat output!", respo=str(respo), command=formatted_command
)
# Different SRA files can create different output formats, somehow.
# This mess tries every output method we can to parse these stats.
# If it's so messed up we don't know, default to short.
try:
bases_count = int(stats.Run.Bases["count"])
reads_count = int(stats.Run.Statistics["nspots"]) * int(stats.Run.Statistics["nreads"])
job_context["sra_num_reads"] = int(stats.Run.Statistics["nreads"])
job_context["index_length_raw"] = int(bases_count / reads_count)
except Exception:
try:
job_context["sra_num_reads"] = int(stats.Run.Statistics["nreads"])
spot_count_mates = int(stats.Run["spot_count_mates"])
base_count_bio_mates = int(stats.Run["base_count_bio_mates"])
reads_count = spot_count_mates * int(stats.Run.Statistics["nreads"])
job_context["index_length_raw"] = int(base_count_bio_mates / reads_count)
except Exception:
try:
job_context["index_length_raw"] = int(stats.Run.Statistics.Read[0]["average"])
except Exception:
# sra-stat will sometimes put warnings in the XML stream, so we end up with nothing valid to parse.
# https://github.com/ncbi/sra-tools/issues/192
logger.error(
"Unable to determine index length! Defaulting to small",
file=job_context["sra_input_file_path"],
)
job_context["index_length_raw"] = -1
if job_context["index_length_raw"] > 75:
job_context["index_length"] = "long"
else:
job_context["index_length"] = "short"
if not job_context.get("sra_num_reads", None):
try:
sample = job_context["sample"]
for exp in sample.experiments.all():
for annotation in exp.experimentannotation_set.all():
if annotation.data.get("library_layout", "").upper() == "PAIRED":
job_context["sra_num_reads"] = 2
return job_context
if annotation.data.get("library_layout", "").upper() == "SINGLE":
job_context["sra_num_reads"] = 1
return job_context
except Exception as e:
logger.exception(
"Problem trying to determine library strategy (single/paired)!",
file=job_context["sra_input_file_path"],
)
job_context[
"job"
].failure_reason = "Unable to determine library strategy (single/paired): " + str(e)
job_context["success"] = False
return job_context
if not job_context.get("sra_num_reads", None):
logger.error(
"Completely unable to determine library strategy (single/paired)!",
file=job_context["sra_input_file_path"],
)
job_context["job"].failure_reason = "Unable to determine library strategy (single/paired)"
job_context["success"] = False
return job_context
def _determine_index_length(job_context: Dict) -> Dict:
"""Determines whether to use the long or short salmon index.
Adds the key 'index_length' to the job_context with a value of
'short' if the short index is appropriate or 'long' if the long
index is appropriate. For more information on index length see the
_create_index function of the transcriptome_index processor.
"""
if job_context.get("sra_input_file_path", None):
return _determine_index_length_sra(job_context)
logger.debug("Determining index length..")
total_base_pairs = 0
number_of_reads = 0
counter = 1
if ".gz" == job_context["input_file_path"][-3:]:
cat = "zcat"
else:
cat = "cat"
# zcat unzips the file provided and dumps the output to STDOUT.
# It is installed by default in Debian so it should be included
# in every docker image already.
with subprocess.Popen(
[cat, job_context["input_file_path"]], stdout=subprocess.PIPE, universal_newlines=True
) as process:
for line in process.stdout:
# In the FASTQ file format, there are 4 lines for each
# read. Three of these contain metadata about the
# read. The string representing the read itself is found
# on the second line of each quartet.
if counter % 4 == 2:
total_base_pairs += len(line.replace("\n", ""))
number_of_reads += 1
counter += 1
if "input_file_path_2" in job_context:
if ".gz" == job_context["input_file_path_2"][-3:]:
cat = "zcat"
else:
cat = "cat"
with subprocess.Popen(
[cat, job_context["input_file_path_2"]], stdout=subprocess.PIPE, universal_newlines=True
) as process:
for line in process.stdout:
if counter % 4 == 2:
total_base_pairs += len(line.replace("\n", ""))
number_of_reads += 1
counter += 1
if number_of_reads == 0:
logger.error(
"Unable to determine number_of_reads for job.",
input_file_1=job_context.get("input_file_path"),
input_file_2=job_context.get("input_file_path_2"),
job_id=job_context["job"].id,
)
job_context["job"].failure_reason = "Unable to determine number_of_reads."
job_context["job"].no_retry = True
job_context["success"] = False
return job_context
index_length_raw = total_base_pairs / number_of_reads
# Put the raw index length into the job context in a new field for regression testing purposes
job_context["index_length_raw"] = index_length_raw
if index_length_raw > 75:
job_context["index_length"] = "long"
else:
job_context["index_length"] = "short"
return job_context
def _find_or_download_index(job_context: Dict) -> Dict:
"""Finds the appropriate Salmon Index for this experiment.
Salmon documentation states:
"If you want to use Salmon in quasi-mapping-based mode, then you
first have to build an Salmon index for your transcriptome."
We have used the Data Refinery to build these indices already,
this function retrieves the location of the correct index for the
organism and read length and adds it to the job context.
"""
logger.debug("Fetching and installing index..")
index_type = "TRANSCRIPTOME_" + job_context["index_length"].upper()
index_object = (
OrganismIndex.objects.filter(organism=job_context["organism"], index_type=index_type)
.order_by("-created_at")
.first()
)
if not index_object:
logger.error(
"Could not run Salmon processor without index for organism",
organism=job_context["organism"],
processor_job=job_context["job_id"],
index_type=index_type,
)
job_context["job"].no_retry = True
job_context["job"].failure_reason = "Missing transcriptome index. (" + index_type + ")"
job_context["success"] = False
return job_context
job_context["index_directory"] = index_object.absolute_directory_path
try:
# The organism index only needs to be downloaded from S3 once per
# organism per index length per EBS volume. We don't know if
# another job has started downloading it yet, started extracting
# it yet, or already finished and been symlinked to a common
# location. Therefore check to see if it's happened before we
# complete each step.
version_info_path = job_context["index_directory"] + "/versionInfo.json"
# Something very bad happened and now there are corrupt indexes installed. Nuke 'em.
if os.path.exists(version_info_path) and (os.path.getsize(version_info_path) == 0):
logger.error("We have to nuke a zero-valued index directory: " + version_info_path)
shutil.rmtree(job_context["index_directory"], ignore_errors=True)
os.makedirs(job_context["index_directory"], exist_ok=True)
index_tarball = None
if not os.path.exists(version_info_path):
# Index is not installed yet, so download it.
index_file = ComputedFile.objects.filter(result=index_object.result)[0]
index_tarball = index_file.sync_from_s3(
path=job_context["work_dir"] + index_file.filename
)
index_hard_dir = None
if not os.path.exists(version_info_path):
# Index is still not installed yet, so extract it.
# Create a temporary location to download the index to, which
# can be symlinked to once extraction is complete.
index_hard_dir = os.path.join(LOCAL_ROOT_DIR, job_context["job_dir_prefix"]) + "_index/"
os.makedirs(index_hard_dir)
with tarfile.open(index_tarball, "r:gz") as index_archive:
index_archive.extractall(index_hard_dir)
if not os.path.exists(version_info_path):
# Index is still not installed yet, so symlink the files we
# have to where they are expected to reside.
os.makedirs(job_context["index_directory"], exist_ok=True)
index_files = [
"versionInfo.json",
"duplicate_clusters.tsv",
"hash.bin",
"indexing.log",
"refInfo.json",
"sa.bin",
"genes_to_transcripts.txt",
"header.json",
"quasi_index.log",
"rsd.bin",
"txpInfo.bin",
]
for subfile in index_files:
os.symlink(index_hard_dir + subfile, job_context["index_directory"] + "/" + subfile)
elif index_hard_dir:
# We have failed the race.
logger.error("We have failed the index extraction race! Removing dead trees.")
shutil.rmtree(index_hard_dir, ignore_errors=True)
except Exception as e:
error_template = "Failed to download or extract transcriptome index for organism {0}: {1}"
error_message = error_template.format(str(job_context["organism"]), str(e))
logger.exception(error_message, processor_job=job_context["job_id"])
job_context["job"].failure_reason = error_message
job_context["success"] = False
# Make sure we don't leave an empty index directory lying around.
shutil.rmtree(index_hard_dir, ignore_errors=True)
return job_context
# The index tarball contains a directory named index, so add that
# to the path where we should put it.
job_context["genes_to_transcripts_path"] = os.path.join(
job_context["index_directory"], "genes_to_transcripts.txt"
)
job_context["organism_index"] = index_object
return job_context
def _run_tximport_for_experiment(
job_context: Dict, experiment: Experiment, quant_files: List[ComputedFile]
) -> Dict:
# Download all the quant.sf fles for this experiment. Write all
# their paths to a file so we can pass a path to that to
# tximport.R rather than having to pass in one argument per
# sample.
tximport_path_list_file = job_context["work_dir"] + "tximport_inputs.txt"
quant_file_paths = {}
with open(tximport_path_list_file, "w") as input_list:
for quant_file in quant_files:
# We create a directory in the work directory for each (quant.sf) file, as
# tximport assigns column names based on the parent directory name,
# and we need those names so that we can reassociate withe samples later.
# ex., a file with absolute_file_path: /processor_job_1/SRR123_output/quant.sf
# downloads to: /processor_job_2/SRR123_output/quant.sf
# So the result file has frame "SRR123_output", which we can associate with sample SRR123
sample_output = (
job_context["work_dir"] + str(quant_file.absolute_file_path.split("/")[-2]) + "/"
)
os.makedirs(sample_output, exist_ok=True)
quant_work_path = sample_output + quant_file.filename
quant_file_path = quant_file.get_synced_file_path(path=quant_work_path)
input_list.write(quant_file_path + "\n")
quant_file_paths[quant_file_path] = os.stat(quant_file_path).st_size
rds_filename = "txi_out.RDS"
rds_file_path = job_context["work_dir"] + rds_filename
tpm_filename = "gene_lengthScaledTPM.tsv"
tpm_file_path = job_context["work_dir"] + tpm_filename
result = ComputationalResult()
cmd_tokens = [
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/tximport.R",
"--file_list",
tximport_path_list_file,
"--gene2txmap",
job_context["genes_to_transcripts_path"],
"--rds_file",
rds_file_path,
"--tpm_file",
tpm_file_path,
]
result.time_start = timezone.now()
logger.debug(
"Running tximport with: %s",
str(cmd_tokens),
processor_job=job_context["job_id"],
experiment=experiment.id,
)
try:
tximport_result = subprocess.run(cmd_tokens, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
error_template = "Encountered error in R code while running tximport.R: {}"
error_message = error_template.format(str(e))
logger.error(error_message, processor_job=job_context["job_id"], experiment=experiment.id)
job_context["job"].failure_reason = error_message
job_context["success"] = False
return job_context
if tximport_result.returncode != 0:
error_template = "Found non-zero exit code from R code while running tximport.R: {}"
error_message = error_template.format(tximport_result.stderr.decode().strip())
logger.error(
error_message,
processor_job=job_context["job_id"],
experiment=experiment.id,
quant_files=quant_files,
cmd_tokens=cmd_tokens,
quant_file_paths=quant_file_paths,
)
job_context["job"].failure_reason = error_message
job_context["success"] = False
return job_context
result.time_end = timezone.now()
result.commands.append(" ".join(cmd_tokens))
result.is_ccdl = True
try:
processor_key = "TXIMPORT"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context["pipeline"].steps.append(result.id)
# Associate this result with all samples in this experiment.
# TODO: This may not be completely sensible, because `tximport` is
# done at experiment level, not at sample level.
# Could be very problematic if SRA's data model allows many
# Experiments to one Run.
# https://github.com/AlexsLemonade/refinebio/issues/297
for sample in experiment.samples.all():
s_r = SampleResultAssociation(sample=sample, result=result)
s_r.save()
rds_file = ComputedFile()
rds_file.absolute_file_path = rds_file_path
rds_file.filename = rds_filename
rds_file.result = result
rds_file.is_smashable = False
rds_file.is_qc = False
rds_file.is_public = True
rds_file.calculate_sha1()
rds_file.calculate_size()
rds_file.save()
job_context["computed_files"].append(rds_file)
# Split the tximport result into smashable subfiles
data = pd.read_csv(tpm_file_path, sep="\t", header=0, index_col=0)
individual_files = []
frames = np.split(data, len(data.columns), axis=1)
for frame in frames:
# Create sample-specific TPM file.
sample_file_name = frame.columns.values[0] + "_" + tpm_filename
frame_path = os.path.join(job_context["work_dir"], sample_file_name)
frame.to_csv(frame_path, sep="\t", encoding="utf-8")
# The frame column header is based off of the path, which includes _output.
sample = Sample.objects.get(accession_code=frame.columns.values[0].replace("_output", ""))
computed_file = ComputedFile()
computed_file.absolute_file_path = frame_path
computed_file.filename = sample_file_name
computed_file.result = result
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.is_public = True
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.save()
job_context["computed_files"].append(computed_file)
job_context["smashable_files"].append(computed_file)
SampleResultAssociation.objects.get_or_create(sample=sample, result=result)
# Create association with the RDS file.
SampleComputedFileAssociation.objects.get_or_create(sample=sample, computed_file=rds_file)
# Create association with TPM file.
SampleComputedFileAssociation.objects.get_or_create(
sample=sample, computed_file=computed_file
)
individual_files.append(computed_file)
job_context["samples"].append(sample)
# Salmon-processed samples aren't marked as is_processed
# until they are fully tximported, this value sets that
# for the end_job function.
job_context["tximported"] = True
job_context["individual_files"] = individual_files
return job_context
def get_tximport_inputs(job_context: Dict) -> Dict:
"""Adds to the job_context a mapping from experiments to a list of their quant files.
Checks all the experiments which contain a sample from the current
experiment. If any of them are fully processed (at least with
salmon-quant) then the return dict will include the experiment
mapping to a list of paths to the quant.sf file for each sample in
that experiment.
"""
experiments = Experiment.objects.filter(
samples=job_context["sample"]
) # https://stackoverflow.com/a/18317340/763705
quantified_experiments = {}
for experiment in experiments:
# We only want to consider samples that we actually can run salmon on.
eligible_samples = experiment.samples.filter(source_database="SRA", technology="RNA-SEQ")
num_eligible_samples = eligible_samples.count()
if num_eligible_samples == 0:
continue
salmon_quant_results = get_quant_results_for_experiment(experiment)
is_tximport_job = "is_tximport_only" in job_context and job_context["is_tximport_only"]
if is_tximport_job and salmon_quant_results.count() > 0:
# If the job is only running tximport, then index_length
# hasn't been set on the job context because we don't have
# a raw file to run it on. Therefore pull it from one of
# the result annotations.
annotations = ComputationalResultAnnotation.objects.filter(
result=salmon_quant_results[0]
)
for annotation_json in annotations:
if "index_length" in annotation_json.data:
job_context["index_length"] = annotation_json.data["index_length"]
break
if not "index_length" in job_context:
failure_reason = (
"Found quant result without an annotation specifying its index length. "
"Why did this happen?!?"
)
logger.error(failure_reason, processor_job=job_context["job_id"])
job_context["job"].failure_reason = failure_reason
job_context["job"].no_retry = True
job_context["success"] = False
return job_context
if should_run_tximport(experiment, salmon_quant_results, is_tximport_job):
quantified_experiments[experiment] = get_quant_files_for_results(salmon_quant_results)
job_context["tximport_inputs"] = quantified_experiments
return job_context
def tximport(job_context: Dict) -> Dict:
"""Run tximport R script based on input quant files and the path
of genes_to_transcripts.txt.
"""
tximport_inputs = job_context["tximport_inputs"]
quantified_experiments = 0
for experiment, quant_files in tximport_inputs.items():
job_context = _run_tximport_for_experiment(job_context, experiment, quant_files)
# If `tximport` on any related experiment fails, exit immediately.
if not job_context["success"]:
return job_context
quantified_experiments += 1
if (
quantified_experiments == 0
and "is_tximport_job" in job_context
and job_context["is_tximport_only"]
):
failure_reason = "Tximport job ran on no experiments... Why?!?!?"
logger.error(failure_reason, processor_job=job_context["job_id"])
job_context["job"].failure_reason = failure_reason
job_context["job"].no_retry = True
job_context["success"] = False
return job_context
def _run_salmon(job_context: Dict) -> Dict:
"""Runs Salmon Quant."""
logger.debug("Running Salmon..")
# Salmon needs to be run differently for different sample types.
# SRA files also get processed differently as we don't want to use fasterq-dump to extract
# them to disk.
if job_context.get("sra_input_file_path", None):
# Single reads
if job_context["sra_num_reads"] == 1:
fifo = "/tmp/barney"
os.mkfifo(fifo)
dump_str = "fastq-dump --stdout {input_sra_file} > {fifo} &"
formatted_dump_command = dump_str.format(
input_sra_file=job_context["sra_input_file_path"], fifo=fifo
)
dump_po = subprocess.Popen(
formatted_dump_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
command_str = (
"salmon --no-version-check quant -l A -i {index} "
"-r {fifo} -p 16 -o {output_directory} --seqBias --dumpEq --writeUnmappedNames"
)
formatted_command = command_str.format(
index=job_context["index_directory"],
input_sra_file=job_context["sra_input_file_path"],
fifo=fifo,
output_directory=job_context["output_directory"],
)
# Paired are trickier
else:
# Okay, for some reason I can't explain, this only works in the temp directory,
# otherwise the `tee` part will only output to one or the other of the streams (non-deterministically),
# but not both. This doesn't appear to happen if the fifos are in tmp.
alpha = "/tmp/alpha"
os.mkfifo(alpha)
beta = "/tmp/beta"
os.mkfifo(beta)
dump_str = "fastq-dump --stdout --split-files -I {input_sra_file} | tee >(grep '@.*\.1\s' -A3 --no-group-separator > {fifo_alpha}) >(grep '@.*\.2\s' -A3 --no-group-separator > {fifo_beta}) > /dev/null &"
formatted_dump_command = dump_str.format(
input_sra_file=job_context["sra_input_file_path"], fifo_alpha=alpha, fifo_beta=beta
)
dump_po = subprocess.Popen(
formatted_dump_command,
shell=True,
executable="/bin/bash",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
command_str = (
"salmon --no-version-check quant -l A -i {index} "
"-1 {fifo_alpha} -2 {fifo_beta} -p 16 -o {output_directory} --seqBias --dumpEq --writeUnmappedNames"
)
formatted_command = command_str.format(
index=job_context["index_directory"],
input_sra_file=job_context["sra_input_file_path"],
fifo_alpha=alpha,
fifo_beta=beta,
output_directory=job_context["output_directory"],
)
else:
if "input_file_path_2" in job_context:
second_read_str = " -2 {}".format(job_context["input_file_path_2"])
# Rob recommends 16 threads/process, which fits snugly on an x1 at 8GB RAM per Salmon container:
# (2 threads/core * 16 cores/socket * 64 vCPU) / (1TB/8GB) = ~17
command_str = (
"salmon --no-version-check quant -l A --biasSpeedSamp 5 -i {index}"
" -1 {input_one}{second_read_str} -p 16 -o {output_directory}"
" --seqBias --gcBias --dumpEq --writeUnmappedNames"
)
formatted_command = command_str.format(
index=job_context["index_directory"],
input_one=job_context["input_file_path"],
second_read_str=second_read_str,
output_directory=job_context["output_directory"],
)
else:
# Related: https://github.com/COMBINE-lab/salmon/issues/83
command_str = (
"salmon --no-version-check quant -l A -i {index}"
" -r {input_one} -p 16 -o {output_directory}"
" --seqBias --dumpEq --writeUnmappedNames"
)
formatted_command = command_str.format(
index=job_context["index_directory"],
input_one=job_context["input_file_path"],
output_directory=job_context["output_directory"],
)
logger.debug(
"Running Salmon Quant using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"],
)
# Salmon probably shouldn't take longer than three hours.
timeout = 60 * 60 * 3
job_context["time_start"] = timezone.now()
try:
completed_command = subprocess.run(
formatted_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=timeout,
)
except subprocess.TimeoutExpired:
failure_reason = "Salmon timed out because it failed to complete within 3 hours."
logger.error(
failure_reason,
sample_accesion_code=job_context["sample"].accession_code,
processor_job=job_context["job_id"],
)
job_context["job"].failure_reason = failure_reason
job_context["job"].no_retry = True
job_context["success"] = False
return job_context
job_context["time_end"] = timezone.now()
if completed_command.returncode == 1:
stderr = completed_command.stderr.decode().strip()
error_start = stderr.upper().find("ERROR:")
error_start = error_start if error_start != -1 else 0
logger.error(
"Shell call to salmon failed with error message: %s",
stderr[error_start:],
processor_job=job_context["job_id"],
)
# If salmon has an error exit code then we don't want to retry it.
job_context["job"].no_retry = True
job_context["job"].failure_reason = (
"Shell call to salmon failed because: " + stderr[error_start:]
)
job_context["success"] = False
else:
result = ComputationalResult()
result.commands.append(formatted_command)
result.time_start = job_context["time_start"]
result.time_end = job_context["time_end"]
result.organism_index = job_context["organism_index"]
result.is_ccdl = True
try:
processor_key = "SALMON_QUANT"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
# Zip up the output of Salmon Quant
try:
with tarfile.open(job_context["output_archive"], "w:gz") as tar:
tar.add(job_context["output_directory"], arcname=os.sep)
except Exception:
logger.exception(
"Exception caught while zipping processed directory %s",
job_context["output_directory"],
processor_job=job_context["job_id"],
)
failure_template = "Exception caught while zipping processed directory {}"
job_context["job"].failure_reason = failure_template.format(
job_context["output_archive"]
)
job_context["success"] = False
return job_context
salmon_quant_archive = ComputedFile()
salmon_quant_archive.absolute_file_path = job_context["output_archive"]
salmon_quant_archive.filename = os.path.split(job_context["output_archive"])[-1]
salmon_quant_archive.calculate_sha1()
salmon_quant_archive.calculate_size()
salmon_quant_archive.is_public = True
salmon_quant_archive.is_smashable = False
salmon_quant_archive.is_qc = False
quant_file = ComputedFile()
quant_file.s3_bucket = S3_BUCKET_NAME
timestamp = str(timezone.now().timestamp()).split(".")[0]
quant_file.s3_key = "quant_files/sample_{0}_{1}_quant.sf".format(
job_context["sample"].id, timestamp
)
quant_file.filename = "quant.sf"
quant_file.absolute_file_path = job_context["output_directory"] + "quant.sf"
quant_file.is_public = False
quant_file.is_smashable = False
quant_file.is_qc = False
quant_file.calculate_sha1()
quant_file.calculate_size()
# If we're running in the cloud we need to upload the quant.sf
# file so that it can be used by a job running on any machine
# to run tximport. We can't use sync_to_s3 though because we
# have to sync it before we can save the file so it cannot be
# discovered by other jobs before it is uploaded.
if settings.RUNNING_IN_CLOUD:
try:
S3.upload_file(
quant_file.absolute_file_path,
quant_file.s3_bucket,
quant_file.s3_key,
ExtraArgs={"ACL": "public-read", "StorageClass": "STANDARD_IA"},
)
except Exception as e:
logger.exception(
e, processor_job=job_context["job_id"], sample=job_context["sample"].id
)
failure_template = "Exception caught while uploading quantfile to S3: {}"
job_context["job"].failure_reason = failure_template.format(
quant_file.absolute_file_path
)
job_context["success"] = False
return job_context
# Here select_for_update() is used as a mutex that forces multiple
# jobs to execute this block of code in serial manner. See:
# https://docs.djangoproject.com/en/1.11/ref/models/querysets/#select-for-update
# Theorectically any rows in any table can be locked here, we're
# locking all existing rows in ComputationalResult table.
with transaction.atomic():
ComputationalResult.objects.select_for_update()
result.save()
job_context["quant_result"] = result
quant_file.result = result
quant_file.save()
job_context["result"] = result
job_context["pipeline"].steps.append(result.id)
SampleResultAssociation.objects.get_or_create(
sample=job_context["sample"], result=result
)
salmon_quant_archive.result = result
salmon_quant_archive.save()
job_context["computed_files"].append(salmon_quant_archive)
kv = ComputationalResultAnnotation()
kv.data = {
"index_length": job_context["index_length"],
"index_length_get": job_context.get("index_length_raw", None),
}
kv.result = result
kv.is_public = True
kv.save()
try:
with open(
os.path.join(job_context["output_directory"], "lib_format_counts.json")
) as lfc_file:
format_count_data = json.load(lfc_file)
kv = ComputationalResultAnnotation()
kv.data = format_count_data
kv.result = result
kv.is_public = True
kv.save()
except Exception:
# See: https://github.com/AlexsLemonade/refinebio/issues/1167
logger.exception(
"Error parsing Salmon lib_format_counts JSON output!",
processor_job=job_context["job_id"],
)
try:
with open(
os.path.join(job_context["output_directory"], "aux_info", "meta_info.json")
) as mi_file:
meta_info = json.load(mi_file)
kv = ComputationalResultAnnotation()
kv.data = meta_info
kv.result = result
kv.is_public = True
kv.save()
except Exception:
# See: https://github.com/AlexsLemonade/refinebio/issues/1167
logger.exception(
"Error parsing Salmon meta_info JSON output!", processor_job=job_context["job_id"]
)
job_context["success"] = True
return job_context
def _run_salmontools(job_context: Dict) -> Dict:
""" Run Salmontools to extract unmapped genes. """
logger.debug("Running SalmonTools ...")
unmapped_filename = job_context["output_directory"] + "aux_info/unmapped_names.txt"
command_str = "salmontools extract-unmapped -u {unmapped_file} -o {output} "
output_prefix = job_context["salmontools_directory"] + "unmapped_by_salmon"
command_str = command_str.format(unmapped_file=unmapped_filename, output=output_prefix)
if "input_file_path_2" in job_context:
command_str += "-1 {input_1} -2 {input_2}"
command_str = command_str.format(
input_1=job_context["input_file_path"], input_2=job_context["input_file_path_2"]
)
else:
command_str += "-r {input_1}"
command_str = command_str.format(input_1=job_context["input_file_path"])
start_time = timezone.now()
logger.debug(
"Running the following SalmonTools command: %s",
command_str,
processor_job=job_context["job_id"],
)
completed_command = subprocess.run(
command_str.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
end_time = timezone.now()
# As of SalmonTools 0.1.0, completed_command.returncode is always 0,
# (even if error happens). completed_command.stderr is not totally
# reliable either, because it will output the following line even
# when the execution succeeds:
# "There were <N> unmapped reads\n"
# in which "<N>" is the number of lines in input unmapped_names.txt.
#
# As a workaround, we are using a regular expression here to test
# the status of SalmonTools execution. Any text in stderr that is
# not in the above format is treated as error message.
status_str = completed_command.stderr.decode().strip()
success_pattern = r"^There were \d+ unmapped reads$"
if re.match(success_pattern, status_str):
# Zip up the output of salmontools
try:
with tarfile.open(job_context["salmontools_archive"], "w:gz") as tar:
tar.add(job_context["salmontools_directory"], arcname=os.sep)
except Exception:
logger.exception(
"Exception caught while zipping processed directory %s",
job_context["salmontools_directory"],
processor_job=job_context["job_id"],
)
failure_template = "Exception caught while zipping salmontools directory {}"
job_context["job"].failure_reason = failure_template.format(
job_context["salmontools_archive"]
)
job_context["success"] = False
return job_context
result = ComputationalResult()
result.commands.append(command_str)
result.time_start = start_time
result.time_end = end_time
result.is_ccdl = True
try:
processor_key = "SALMONTOOLS"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context["pipeline"].steps.append(result.id)
assoc = SampleResultAssociation()
assoc.sample = job_context["sample"]
assoc.result = result
assoc.save()
computed_file = ComputedFile()
computed_file.filename = job_context["salmontools_archive"].split("/")[-1]
computed_file.absolute_file_path = job_context["salmontools_archive"]
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.is_public = True
computed_file.is_smashable = False
computed_file.is_qc = True
computed_file.result = result
computed_file.save()
job_context["computed_files"].append(computed_file)
assoc = SampleComputedFileAssociation()
assoc.sample = job_context["sample"]
assoc.computed_file = computed_file
assoc.save()
job_context["result"] = result
job_context["success"] = True
else: # error in salmontools
logger.error(
"Shell call to salmontools failed with error message: %s",
status_str,
processor_job=job_context["job_id"],
)
job_context["job"].failure_reason = (
"Shell call to salmontools failed because: " + status_str
)
job_context["success"] = False
return job_context
def salmon(job_id: int) -> None:
"""Main processor function for the Salmon Processor.
Runs salmon quant command line tool, specifying either a long or
short read length. Also runs Salmontools and Tximport.
"""
pipeline = Pipeline(name=PipelineEnum.SALMON.value)
final_context = utils.run_pipeline(
{"job_id": job_id, "pipeline": pipeline},
[
utils.start_job,
_set_job_prefix,
_prepare_files,
_determine_index_length,
_find_or_download_index,
_run_salmon,
get_tximport_inputs,
tximport,
_run_salmontools,
utils.end_job,
],
)
return final_context
<|code_end|>
| common/data_refinery_common/models/models.py
<|code_start|>import hashlib
import io
import os
import shutil
import uuid
from datetime import datetime
from functools import partial
from typing import Dict, Set
from django.conf import settings
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import models, transaction
from django.db.models import Count, DateTimeField, Prefetch
from django.db.models.expressions import F, Q
from django.utils import timezone
import boto3
import pytz
from botocore.client import Config
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models.organism import Organism
from data_refinery_common.utils import (
FileUtils,
calculate_file_size,
calculate_sha1,
get_env_variable,
get_s3_url,
)
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client("s3", config=Config(signature_version="s3v4"))
logger = get_and_configure_logger(__name__)
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
# We store what salmon ouptuts as its version, therefore for
# comparisions or defaults we shouldn't just store the version string,
# we need something with the pattern: 'salmon X.X.X'
CURRENT_SALMON_VERSION = "salmon " + get_env_variable("SALMON_VERSION", "0.13.1")
CHUNK_SIZE = 1024 * 256 # chunk_size is in bytes
"""
# First Order Classes
This represent the primary data types we will be querying
and filtering against.
"""
class PublicObjectsManager(models.Manager):
"""
Only returns objects that have is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class ProcessedObjectsManager(models.Manager):
"""
Only returns objects that have is_processed and is_public
"""
def get_queryset(self):
return super().get_queryset().filter(is_processed=True, is_public=True)
class Sample(models.Model):
"""
An individual sample.
"""
class Meta:
db_table = "samples"
base_manager_name = "public_objects"
get_latest_by = "created_at"
indexes = [
models.Index(fields=["accession_code"]),
]
def __str__(self):
return self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_objects = ProcessedObjectsManager()
# Identifiers
accession_code = models.CharField(max_length=255, unique=True)
title = models.CharField(max_length=255, unique=False, blank=True)
# Relations
organism = models.ForeignKey(Organism, blank=True, null=True, on_delete=models.SET_NULL)
results = models.ManyToManyField("ComputationalResult", through="SampleResultAssociation")
original_files = models.ManyToManyField("OriginalFile", through="OriginalFileSampleAssociation")
computed_files = models.ManyToManyField("ComputedFile", through="SampleComputedFileAssociation")
experiments = models.ManyToManyField("Experiment", through="ExperimentSampleAssociation")
# Historical Properties
source_database = models.CharField(max_length=255, blank=False)
source_archive_url = models.CharField(max_length=255)
source_filename = models.CharField(max_length=255, blank=False)
source_absolute_file_path = models.CharField(max_length=255)
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Technological Properties
platform_accession_code = models.CharField(max_length=256, blank=True)
platform_name = models.CharField(max_length=256, blank=True)
technology = models.CharField(max_length=256, blank=True) # MICROARRAY, RNA-SEQ
manufacturer = models.CharField(max_length=256, blank=True)
protocol_info = JSONField(default=dict)
# Scientific Properties
sex = models.CharField(max_length=255, blank=True)
age = models.DecimalField(max_length=255, blank=True, max_digits=8, decimal_places=3, null=True)
specimen_part = models.CharField(max_length=255, blank=True)
genotype = models.CharField(max_length=255, blank=True)
disease = models.CharField(max_length=255, blank=True)
disease_stage = models.CharField(max_length=255, blank=True)
cell_line = models.CharField(max_length=255, blank=True)
treatment = models.CharField(max_length=255, blank=True)
race = models.CharField(max_length=255, blank=True)
subject = models.CharField(max_length=255, blank=True)
compound = models.CharField(max_length=255, blank=True)
time = models.CharField(max_length=255, blank=True)
# Crunch Properties
is_processed = models.BooleanField(default=False)
# Blacklisting
is_blacklisted = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Sample, self).save(*args, **kwargs)
def to_metadata_dict(self):
"""Render this Sample as a dict."""
metadata = {}
metadata["refinebio_title"] = self.title
metadata["refinebio_accession_code"] = self.accession_code
metadata["refinebio_organism"] = self.organism.name if self.organism else None
metadata["refinebio_source_database"] = self.source_database
metadata["refinebio_source_archive_url"] = self.source_archive_url
metadata["refinebio_sex"] = self.sex
metadata["refinebio_age"] = self.age or ""
metadata["refinebio_specimen_part"] = self.specimen_part
metadata["refinebio_genetic_information"] = self.genotype
metadata["refinebio_disease"] = self.disease
metadata["refinebio_disease_stage"] = self.disease_stage
metadata["refinebio_cell_line"] = self.cell_line
metadata["refinebio_treatment"] = self.treatment
metadata["refinebio_race"] = self.race
metadata["refinebio_subject"] = self.subject
metadata["refinebio_compound"] = self.compound
metadata["refinebio_time"] = self.time
metadata["refinebio_platform"] = self.pretty_platform
metadata["refinebio_annotations"] = [
data for data in self.sampleannotation_set.all().values_list("data", flat=True)
]
return metadata
# Returns a set of ProcessorJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_processor_jobs(self) -> Set:
processor_jobs = set()
for original_file in self.original_files.prefetch_related("processor_jobs").all():
for processor_job in original_file.processor_jobs.all():
processor_jobs.add(processor_job)
return processor_jobs
# Returns a set of DownloaderJob objects but we cannot specify
# that in type hints because it hasn't been declared yet.
def get_downloader_jobs(self) -> Set:
downloader_jobs = set()
for original_file in self.original_files.prefetch_related("downloader_jobs").all():
for downloader_job in original_file.downloader_jobs.all():
downloader_jobs.add(downloader_job)
return downloader_jobs
def get_result_files(self):
""" Get all of the ComputedFile objects associated with this Sample """
return self.computed_files.all()
def get_most_recent_smashable_result_file(self):
""" Get the most recent of the ComputedFile objects associated with this Sample """
try:
latest_computed_file = self.computed_files.filter(
is_public=True, is_smashable=True,
).latest()
return latest_computed_file
except ComputedFile.DoesNotExist as e:
# This sample has no smashable files yet.
return None
def get_most_recent_quant_sf_file(self):
""" Returns the latest quant.sf file that was generated for this sample.
Note: We don't associate that file to the computed_files of this sample, that's
why we have to go through the computational results. """
return (
ComputedFile.objects.filter(
result__in=self.results.all(),
filename="quant.sf",
s3_key__isnull=False,
s3_bucket__isnull=False,
)
.order_by("-created_at")
.first()
)
@property
def pretty_platform(self):
""" Turns
[HT_HG-U133_Plus_PM] Affymetrix HT HG-U133+ PM Array Plate
into
Affymetrix HT HG-U133+ PM Array Plate (hthgu133pluspm)
"""
if "]" in self.platform_name:
platform_base = self.platform_name.split("]")[1].strip()
else:
platform_base = self.platform_name
return platform_base + " (" + self.platform_accession_code + ")"
class SampleAnnotation(models.Model):
""" Semi-standard information associated with a Sample """
class Meta:
db_table = "sample_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(SampleAnnotation, self).save(*args, **kwargs)
class ProcessedPublicObjectsManager(models.Manager):
"""
Only returns Experiments that are is_public and have related is_processed Samples.
"""
def get_queryset(self):
return super().get_queryset().filter(is_public=True, num_processed_samples__gt=0)
class Experiment(models.Model):
""" An Experiment or Study """
class Meta:
db_table = "experiments"
base_manager_name = "public_objects"
def __str__(self):
return "Experiment: " + self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_public_objects = ProcessedPublicObjectsManager()
# Relations
samples = models.ManyToManyField("Sample", through="ExperimentSampleAssociation")
organisms = models.ManyToManyField("Organism", through="ExperimentOrganismAssociation")
# Identifiers
accession_code = models.CharField(max_length=64, unique=True)
alternate_accession_code = models.CharField(max_length=64, unique=True, null=True)
# Historical Properties
source_database = models.CharField(max_length=32) # "ArrayExpress, "SRA", "GEO"
source_url = models.TextField()
# Properties
# I was always under the impression that TextFields were slower
# than CharFields, however the Postgres documentation disagrees:
# https://www.postgresql.org/docs/9.0/static/datatype-character.html
title = models.TextField()
description = models.TextField()
protocol_description = JSONField(default=dict)
technology = models.CharField(max_length=256, blank=True)
submitter_institution = models.CharField(max_length=256, blank=True)
has_publication = models.BooleanField(default=False)
publication_title = models.TextField(default="")
publication_doi = models.CharField(max_length=64, blank=True)
publication_authors = ArrayField(models.TextField(), default=list)
pubmed_id = models.CharField(max_length=32, blank=True)
source_first_published = models.DateTimeField(null=True)
source_last_modified = models.DateTimeField(null=True)
# Cached Computed Properties
num_total_samples = models.IntegerField(default=0)
num_processed_samples = models.IntegerField(default=0)
num_downloadable_samples = models.IntegerField(default=0)
sample_metadata_fields = ArrayField(models.TextField(), default=list)
platform_names = ArrayField(models.TextField(), default=list)
platform_accession_codes = ArrayField(models.TextField(), default=list)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
if self.accession_code and not self.alternate_accession_code:
if self.accession_code.startswith("GSE"):
self.alternate_accession_code = "E-GEOD-" + self.accession_code[3:]
elif self.accession_code.startswith("E-GEOD-"):
self.alternate_accession_code = "GSE" + self.accession_code[7:]
return super(Experiment, self).save(*args, **kwargs)
def update_num_samples(self):
""" Update our cache values """
aggregates = self.samples.aggregate(
num_total_samples=Count("id"),
num_processed_samples=Count("id", filter=Q(is_processed=True)),
num_downloadable_samples=Count(
"id", filter=Q(is_processed=True, organism__qn_target__isnull=False)
),
)
self.num_total_samples = aggregates["num_total_samples"]
self.num_processed_samples = aggregates["num_processed_samples"]
self.num_downloadable_samples = aggregates["num_downloadable_samples"]
self.save()
def to_metadata_dict(self):
""" Render this Experiment as a dict """
metadata = {}
metadata["title"] = self.title
metadata["accession_code"] = self.accession_code
metadata["organisms"] = list(self.organisms.all().values_list("name", flat=True))
metadata["sample_accession_codes"] = list(
self.samples.all().values_list("accession_code", flat=True)
)
metadata["description"] = self.description
metadata["protocol_description"] = self.protocol_description
metadata["technology"] = self.technology
metadata["submitter_institution"] = self.submitter_institution
metadata["has_publication"] = self.has_publication
metadata["publication_title"] = self.publication_title
metadata["publication_doi"] = self.publication_doi
metadata["pubmed_id"] = self.pubmed_id
if self.source_first_published:
metadata["source_first_published"] = self.source_first_published.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_first_published"] = ""
if self.source_last_modified:
metadata["source_last_modified"] = self.source_last_modified.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_last_modified"] = ""
return metadata
def get_sample_metadata_fields(self):
""" Get all metadata fields that are non-empty for at least one sample in the experiment.
See https://github.com/AlexsLemonade/refinebio-frontend/issues/211 for why this is needed.
"""
fields = []
possible_fields = [
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
]
samples = self.samples.all()
for field in possible_fields:
for sample in samples:
if getattr(sample, field) != None and getattr(sample, field) != "":
fields.append(field)
break
return fields
def update_sample_metadata_fields(self):
self.sample_metadata_fields = self.get_sample_metadata_fields()
def update_platform_names(self):
self.platform_names = self.get_platform_names()
self.platform_accession_codes = self.get_platform_accession_codes()
def get_sample_technologies(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([sample.technology for sample in self.samples.all()]))
def get_platform_names(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_name for sample in self.samples.all()]))
def get_platform_accession_codes(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_accession_code for sample in self.samples.all()]))
@property
def platforms(self):
""" Returns a list of related pipelines """
return list(set([sample.platform_name for sample in self.samples.all()]))
@property
def pretty_platforms(self):
""" Returns a prettified list of related pipelines """
return list(set([sample.pretty_platform for sample in self.samples.all()]))
@property
def processed_samples(self):
return list(
[sample.accession_code for sample in self.samples.all() if sample.is_processed == True]
)
@property
def organism_names(self):
""" Get a list of unique organism names that has at least one downloadable sample """
result = (
self.samples.filter(is_processed=True, organism__qn_target__isnull=False)
.values_list("organism__name", flat=True)
.distinct()
)
return list(result)
@property
def downloadable_samples(self):
"""
Returns the accession codes of the downloadable samples in this experiment.
This is indexed on elastic search and used to count the number of samples
on the filters.
"""
return list(
self.samples.filter(is_processed=True, organism__qn_target__isnull=False).values_list(
"accession_code", flat=True
)
)
class ExperimentAnnotation(models.Model):
""" Semi-standard information associated with an Experiment """
class Meta:
db_table = "experiment_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ExperimentAnnotation, self).save(*args, **kwargs)
class Pipeline(models.Model):
"""Pipeline that is associated with a series of ComputationalResult records."""
name = models.CharField(max_length=255)
steps = ArrayField(models.IntegerField(), default=list)
class Meta:
db_table = "pipelines"
class Processor(models.Model):
"""Processor associated with a certain ComputationalResult."""
name = models.CharField(max_length=255)
version = models.CharField(max_length=64)
docker_image = models.CharField(max_length=255)
environment = JSONField(default=dict)
class Meta:
db_table = "processors"
unique_together = ("name", "version", "docker_image", "environment")
def __str__(self):
return "Processor: %s (version: %s, docker_image: %s)" % (
self.name,
self.version,
self.docker_image,
)
class ComputationalResult(models.Model):
""" Meta-information about the output of a computer process. (Ex Salmon) """
class Meta:
db_table = "computational_results"
base_manager_name = "public_objects"
def __str__(self):
processor_name_str = ""
if self.processor:
processor_name_str = ": " + str(self.processor.name)
return "ComputationalResult " + str(self.pk) + processor_name_str
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
commands = ArrayField(models.TextField(), default=list)
processor = models.ForeignKey(Processor, blank=True, null=True, on_delete=models.CASCADE)
samples = models.ManyToManyField("Sample", through="SampleResultAssociation")
# The Organism Index used to process the sample.
organism_index = models.ForeignKey(
"OrganismIndex", blank=True, null=True, on_delete=models.SET_NULL
)
is_ccdl = models.BooleanField(default=True)
# Stats
time_start = models.DateTimeField(blank=True, null=True)
time_end = models.DateTimeField(blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResult, self).save(*args, **kwargs)
def remove_computed_files_from_s3(self):
""" Removes all associated computed files from S3. Use this before deleting a computational result. """
for computed_file in self.computedfile_set.all():
computed_file.delete_s3_file()
def get_index_length(self):
""" Pull the index_length from one of the result annotations """
annotations = ComputationalResultAnnotation.objects.filter(result=self)
for annotation_json in annotations:
if "index_length" in annotation_json.data:
return annotation_json.data["index_length"]
return None
def get_quant_sf_file(self):
return (
ComputedFile.objects.filter(
result=self, filename="quant.sf", s3_key__isnull=False, s3_bucket__isnull=False,
)
.order_by("-id")
.first()
)
class ComputationalResultAnnotation(models.Model):
""" Non-standard information associated with an ComputationalResult """
class Meta:
db_table = "computational_result_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# Properties
data = JSONField(default=dict)
is_ccdl = models.BooleanField(default=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputationalResultAnnotation, self).save(*args, **kwargs)
# Compendium Computational Result
class CompendiumResult(models.Model):
""" Computational Result For A Compendium """
class Meta:
db_table = "compendium_results"
base_manager_name = "public_objects"
def __str__(self):
return "CompendiumResult " + str(self.pk)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
result = models.ForeignKey(
ComputationalResult,
blank=False,
null=False,
related_name="compendium_result",
on_delete=models.CASCADE,
)
primary_organism = models.ForeignKey(
Organism,
blank=False,
null=False,
related_name="primary_compendium_results",
on_delete=models.CASCADE,
)
organisms = models.ManyToManyField(
Organism, related_name="compendium_results", through="CompendiumResultOrganismAssociation"
)
# Properties
quant_sf_only = models.BooleanField(default=False)
compendium_version = models.IntegerField(blank=True, null=True)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="The SVD algorithm that was used to impute the compendium result.",
)
# Common Properties
is_public = models.BooleanField(default=True)
# helper
def get_computed_file(self):
""" Short hand method for getting the computed file for this compendium"""
return ComputedFile.objects.filter(result=self.result).first()
# TODO
# class Gene(models.Model):
""" A representation of a Gene """
# class Meta:
# db_table = "genes"
class OrganismIndex(models.Model):
""" A special type of process result, necessary for processing other SRA samples """
class Meta:
db_table = "organism_index"
base_manager_name = "public_objects"
def __str__(self):
return (
"OrganismIndex "
+ str(self.pk)
+ ": "
+ self.organism.name
+ " ["
+ self.index_type
+ "] - "
+ str(self.salmon_version)
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# ex., "TRANSCRIPTOME_LONG", "TRANSCRIPTOME_SHORT"
index_type = models.CharField(max_length=255)
# This corresponds to Ensembl's release number:
# http://ensemblgenomes.org/info/about/release_cycle
# Determined by hitting:
# http://rest.ensembl.org/info/software?content-type=application/json
source_version = models.CharField(max_length=255, default="93")
# The name of the genome assembly used which corresponds to 'GRCh38' in:
# ftp://ftp.ensembl.org/pub/release-93/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz
assembly_name = models.CharField(max_length=255, default="UNKNOWN")
# This matters, for instance salmon 0.9.0 indexes don't work with 0.10.0
salmon_version = models.CharField(max_length=255, default=CURRENT_SALMON_VERSION)
# We keep the director unextracted on the shared filesystem so all
# Salmon jobs can access it.
absolute_directory_path = models.CharField(max_length=255, blank=True, null=True, default="")
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_computed_file(self):
""" Short hand method for getting the computed file for this organism index"""
return self.result.computedfile_set.first()
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OrganismIndex, self).save(*args, **kwargs)
"""
# Files
These are the database representations of files
which live on local disk, on ephemeral storage,
or on AWS cloud services.
"""
class OriginalFile(models.Model):
""" A representation of a file from an external source """
class Meta:
db_table = "original_files"
indexes = [
models.Index(fields=["filename"]),
models.Index(fields=["source_filename"]),
]
def __str__(self):
return "OriginalFile: " + self.get_display_name()
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# File Properties
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
size_in_bytes = models.BigIntegerField(blank=True, null=True)
sha1 = models.CharField(max_length=64)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Relations
samples = models.ManyToManyField("Sample", through="OriginalFileSampleAssociation")
processor_jobs = models.ManyToManyField(
"data_refinery_common.ProcessorJob", through="ProcessorJobOriginalFileAssociation"
)
downloader_jobs = models.ManyToManyField(
"data_refinery_common.DownloaderJob", through="DownloaderJobOriginalFileAssociation"
)
# Historical Properties
source_url = models.TextField()
is_archive = models.BooleanField(default=True)
source_filename = models.CharField(max_length=255, blank=False)
# Scientific Properties
has_raw = models.BooleanField(default=True) # Did this sample have a raw data source?
# Crunch Properties
is_downloaded = models.BooleanField(default=False)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(OriginalFile, self).save(*args, **kwargs)
def set_downloaded(self, absolute_file_path, filename=None):
""" Marks the file as downloaded, if `filename` is not provided it will
be parsed from the `absolute_file_path` """
self.is_downloaded = True
self.is_archive = FileUtils.is_archive(absolute_file_path)
self.absolute_file_path = absolute_file_path
self.filename = filename if filename else os.path.basename(absolute_file_path)
self.calculate_size()
self.calculate_sha1()
self.save()
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def get_display_name(self):
""" For dev convenience """
if not self.filename:
return self.source_filename
else:
return self.filename
def get_extension(self):
""" Returns the lowercased extension of the filename
Thanks to https://stackoverflow.com/a/541408/763705 """
return FileUtils.get_extension(self.filename)
def is_blacklisted(self):
return self.get_extension() in [".xml", ".chp", ".exp"]
def delete_local_file(self):
""" Deletes this file from the local file system."""
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception(
"Unexpected delete file exception.", absolute_file_path=self.absolute_file_path
)
self.is_downloaded = False
self.save()
def has_blocking_jobs(self, own_processor_id=None) -> bool:
# If the file has a processor job that should not have been
# retried, then it still shouldn't be retried.
# Exclude the ones that were aborted.
no_retry_processor_jobs = self.processor_jobs.filter(no_retry=True).exclude(abort=True)
# If the file has a processor job that hasn't even started
# yet, then it doesn't need another.
incomplete_processor_jobs = self.processor_jobs.filter(
end_time__isnull=True, success__isnull=True, retried=False
)
if own_processor_id:
incomplete_processor_jobs = incomplete_processor_jobs.exclude(id=own_processor_id)
# Check if there's any jobs which should block another
# processing attempt.
blocking_jobs = no_retry_processor_jobs | incomplete_processor_jobs
return blocking_jobs.first() is not None
def needs_processing(self, own_processor_id=None) -> bool:
"""Returns False if original_file has been or is being processed.
Returns True otherwise.
If own_processor_id is supplied then it will be ignored so
that processor jobs can use this function without their job
being counted as currently processing this file.
"""
sample = self.samples.first()
if not sample:
return True
if self.has_blocking_jobs(own_processor_id):
return False
if sample.source_database == "SRA":
computed_file = sample.get_most_recent_smashable_result_file()
# If there's no smashable file then we should check the quant.sf file.
if not computed_file:
computed_file = sample.get_most_recent_quant_sf_file()
# If there's neither a quant.sf file nor a smashable file
# then we definitely need to process it.
if not computed_file:
return True
if (
computed_file.s3_bucket
and computed_file.s3_key
and computed_file.result.organism_index is not None
and computed_file.result.organism_index.salmon_version == CURRENT_SALMON_VERSION
):
# If the file wasn't computed with the latest
# version of salmon, then it should be rerun
# with the latest version of salmon.
return False
else:
# If this original_file has multiple samples (is an
# archive), and any of them haven't been processed, we'll
# need the entire archive in order to process any of them.
# A check to not re-processed the already processed
# samples in the archive will happen elsewhere before
# dispatching.
for sample in self.samples.all():
if not sample.is_processed:
return True
computed_file = sample.get_most_recent_smashable_result_file()
if not computed_file:
return True
if settings.RUNNING_IN_CLOUD and (
computed_file.s3_bucket is None or computed_file.s3_key is None
):
return True
return False
# If we aren't sure, prefer reprocessing over never processing.
return True
def needs_downloading(self, own_processor_id=None) -> bool:
"""Determine if a file needs to be downloaded.
This is true if the file has already been downloaded and lost
without getting processed.
"""
# If the file is downloaded and the file actually exists on disk,
# then it doens't need to be downloaded.
if self.absolute_file_path and os.path.exists(self.absolute_file_path):
return False
unstarted_downloader_jobs = self.downloader_jobs.filter(
start_time__isnull=True, success__isnull=True, retried=False
)
# If the file has a downloader job that hasn't even started yet,
# then it doesn't need another.
if unstarted_downloader_jobs.count() > 0:
return False
# If this file has been processed, then it doesn't need to be downloaded again.
return self.needs_processing(own_processor_id)
def is_affy_data(self) -> bool:
"""Return true if original_file is a CEL file or a gzipped CEL file.
"""
upper_name = self.source_filename.upper()
return (len(upper_name) > 4 and upper_name[-4:] == ".CEL") or (
len(upper_name) > 7 and upper_name[-7:] == ".CEL.GZ"
)
class ComputedFile(models.Model):
""" A representation of a file created by a data-refinery process """
class Meta:
db_table = "computed_files"
get_latest_by = "created_at"
indexes = [
models.Index(fields=["filename"]),
]
def __str__(self):
return "ComputedFile: " + str(self.filename)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Object relations
samples = models.ManyToManyField("Sample", through="SampleComputedFileAssociation")
# File related
filename = models.CharField(max_length=255)
absolute_file_path = models.CharField(max_length=255, blank=True, null=True)
# TODO: make this work w/ migrations:
# absolute_file_path = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField()
sha1 = models.CharField(max_length=64)
# Relations
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
# Scientific
is_smashable = models.BooleanField(default=False)
is_qc = models.BooleanField(default=False)
is_qn_target = models.BooleanField(default=False)
# Compendia details
quant_sf_only = models.BooleanField(default=False)
is_compendia = models.BooleanField(default=False)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="The SVD algorithm that was used to generate the file.",
)
compendia_organism = models.ForeignKey(
Organism, blank=True, null=True, on_delete=models.CASCADE
)
compendia_version = models.IntegerField(blank=True, null=True)
# AWS
s3_bucket = models.CharField(max_length=255, blank=True, null=True)
s3_key = models.CharField(max_length=255, blank=True, null=True)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ComputedFile, self).save(*args, **kwargs)
def sync_to_s3(self, s3_bucket=None, s3_key=None) -> bool:
""" Syncs a file to AWS S3.
"""
if not settings.RUNNING_IN_CLOUD:
return True
self.s3_bucket = s3_bucket
self.s3_key = s3_key
try:
S3.upload_file(
self.absolute_file_path,
s3_bucket,
s3_key,
ExtraArgs={"ACL": "public-read", "StorageClass": "STANDARD_IA"},
)
self.save()
except Exception as e:
logger.exception(
"Error uploading computed file to S3",
computed_file_id=self.pk,
s3_key=self.s3_key,
s3_bucket=self.s3_bucket,
)
self.s3_bucket = None
self.s3_key = None
return False
return True
def sync_from_s3(self, force=False, path=None):
""" Downloads a file from S3 to the local file system.
Returns the absolute file path.
"""
path = path if path is not None else self.absolute_file_path
if not settings.RUNNING_IN_CLOUD and not force:
if os.path.exists(path):
return path
else:
# If the file doesn't exist at path and we're not
# running in the cloud, then the file is almost
# certainly at its absolute_file_path because it never got deleted.
if os.path.exists(self.absolute_file_path):
shutil.copyfile(self.absolute_file_path, path)
return path
else:
# We don't have the file :(
return None
target_directory = os.path.dirname(path)
os.makedirs(target_directory, exist_ok=True)
if not self.s3_bucket or not self.s3_key:
raise ValueError("Tried to download a computed file with no s3_bucket or s3_key")
try:
S3.download_file(self.s3_bucket, self.s3_key, path)
# Veryify sync integrity
synced_sha1 = calculate_sha1(path)
if self.sha1 != synced_sha1:
raise AssertionError("SHA1 of downloaded ComputedFile doesn't match database SHA1!")
return path
except Exception as e:
logger.exception(e, computed_file_id=self.pk)
return None
def change_s3_location(self, new_bucket: str, new_key: str) -> bool:
"""Moves the file from its current location in S3.
The new location will be set based on `new_bucket` and
`new_key`. The s3_bucket and s3_key properties will be updated
to reflect this on a successful move.
"""
old_bucket = self.s3_bucket
old_key = self.s3_key
copy_source = {"Bucket": old_bucket, "Key": old_key}
try:
response = S3.copy_object(Bucket=new_bucket, CopySource=copy_source, Key=new_key)
except:
logger.exception(
"Could not copy computed file within S3",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
try:
self.s3_bucket = new_bucket
self.s3_key = new_key
self.save()
except:
logger.exception(
"Could not save computed file after it was copied!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
try:
response = S3.delete_object(Bucket=old_bucket, Key=old_key)
except:
logger.exception(
"Could not delete computed file after it was copied and saved!!!",
computed_file_id=self.id,
source_bucket=old_bucket,
source_key=old_key,
destination_bucket=new_bucket,
destination_key=new_key,
)
return False
return True
def calculate_sha1(self) -> None:
""" Calculate the SHA1 value of a given file.
"""
self.sha1 = calculate_sha1(self.absolute_file_path)
return self.sha1
def calculate_size(self) -> None:
""" Calculate the number of bytes in a given file.
"""
self.size_in_bytes = calculate_file_size(self.absolute_file_path)
return self.size_in_bytes
def delete_local_file(self, force=False):
""" Deletes a file from the path and actually removes it from the file system."""
if not settings.RUNNING_IN_CLOUD and not force:
return
try:
os.remove(self.absolute_file_path)
except OSError:
pass
except TypeError:
pass
except Exception as e:
logger.exception(
"Unexpected delete file exception.", absolute_file_path=self.absolute_file_path
)
def delete_s3_file(self, force=False):
# If we're not running in the cloud then we shouldn't try to
# delete something from S3 unless force is set.
if not settings.RUNNING_IN_CLOUD and not force:
return False
try:
S3.delete_object(Bucket=self.s3_bucket, Key=self.s3_key)
except:
logger.exception(
"Failed to delete S3 object for Computed File.",
computed_file=self.id,
s3_object=self.s3_key,
)
return False
self.s3_key = None
self.s3_bucket = None
self.save()
return True
def get_synced_file_path(self, force=False, path=None):
""" Fetches the absolute file path to this ComputedFile, fetching from S3 if it
isn't already available locally. """
if path:
if os.path.exists(path):
return path
else:
return self.sync_from_s3(force, path)
else:
if os.path.exists(self.absolute_file_path):
return self.absolute_file_path
else:
return self.sync_from_s3(force)
@property
def s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
return self.get_s3_url()
def get_s3_url(self):
""" Render the resulting HTTPS URL for the S3 object."""
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod="get_object",
Params={"Bucket": self.s3_bucket, "Key": self.s3_key},
ExpiresIn=(60 * 60 * 7 * 24), # 7 days in seconds.
)
else:
return None
def has_been_log2scaled(self):
""" Return true if this is a smashable file that has been log2 scaled """
return self.is_smashable and self.filename.endswith("lengthScaledTPM.tsv")
class Dataset(models.Model):
""" A Dataset is a desired set of experiments/samples to smash and download """
AGGREGATE_CHOICES = (("ALL", "All"), ("EXPERIMENT", "Experiment"), ("SPECIES", "Species"))
SCALE_CHOICES = (
("NONE", "None"),
("MINMAX", "Minmax"),
("STANDARD", "Standard"),
("ROBUST", "Robust"),
)
SVD_ALGORITHM_CHOICES = (
("NONE", "None"),
("RANDOMIZED", "randomized"),
("ARPACK", "arpack"),
)
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Experiments and samples live here: {'E-ABC-1': ['SAMP1', 'SAMP2']}
# This isn't going to be queryable, so we can use JSON-in-text, just make
# sure we validate properly in and out!
data = JSONField(
default=dict,
help_text="This is a dictionary where the keys are experiment accession codes and the values are lists with sample accession codes. Eg: `{'E-ABC-1': ['SAMP1', 'SAMP2']}`",
)
# Processing properties
aggregate_by = models.CharField(
max_length=255,
choices=AGGREGATE_CHOICES,
default="EXPERIMENT",
help_text="Specifies how samples are [aggregated](http://docs.refine.bio/en/latest/main_text.html#aggregations).",
)
scale_by = models.CharField(
max_length=255,
choices=SCALE_CHOICES,
default="NONE",
help_text="Specifies options for [transformations](http://docs.refine.bio/en/latest/main_text.html#transformations).",
)
quantile_normalize = models.BooleanField(
default=True,
help_text="Part of the advanced options. Allows [skipping quantile normalization](http://docs.refine.bio/en/latest/faq.html#what-does-it-mean-to-skip-quantile-normalization-for-rna-seq-samples) for RNA-Seq samples.",
)
quant_sf_only = models.BooleanField(
default=False, help_text="Include only quant.sf files in the generated dataset."
)
svd_algorithm = models.CharField(
max_length=255,
choices=SVD_ALGORITHM_CHOICES,
default="NONE",
help_text="Specifies choice of SVD algorithm",
)
# State properties
is_processing = models.BooleanField(default=False) # Data is still editable when False
is_processed = models.BooleanField(default=False) # Result has been made
is_available = models.BooleanField(default=False) # Result is ready for delivery
processor_jobs = models.ManyToManyField(
"data_refinery_common.ProcessorJob", through="ProcessorJobDataSetAssociation"
)
# Fail handling
success = models.NullBooleanField(null=True)
failure_reason = models.TextField()
# Delivery properties
email_address = models.CharField(max_length=255, blank=True, null=True)
email_ccdl_ok = models.BooleanField(default=False)
expires_on = models.DateTimeField(blank=True, null=True)
# Deliverables
s3_bucket = models.CharField(max_length=255)
s3_key = models.CharField(max_length=255)
size_in_bytes = models.BigIntegerField(
blank=True,
null=True,
default=0,
help_text="Contains the size in bytes of the processed dataset.",
)
sha1 = models.CharField(max_length=64, null=True, default="")
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(Dataset, self).save(*args, **kwargs)
def get_samples(self):
""" Retuns all of the Sample objects in this Dataset """
all_samples = []
for sample_list in self.data.values():
all_samples = all_samples + sample_list
all_samples = list(set(all_samples))
return Sample.objects.filter(accession_code__in=all_samples)
def get_total_samples(self):
""" Returns the total number of samples, this counts the number of unique
accession codes in `data`. """
return len(
set(
[
accession_code
for experiment in self.data.values()
for accession_code in experiment
]
)
)
def get_experiments(self):
""" Retuns all of the Experiments objects in this Dataset """
all_experiments = self.data.keys()
return Experiment.objects.filter(accession_code__in=all_experiments)
def get_samples_by_experiment(self):
""" Returns a dict of sample QuerySets, for samples grouped by experiment. """
all_samples = {}
for experiment, samples in self.data.items():
all_samples[experiment] = Sample.objects.filter(accession_code__in=samples)
return all_samples
def get_samples_by_species(self):
""" Returns a dict of sample QuerySets, for samples grouped by species. """
by_species = {}
all_samples = self.get_samples()
for sample in all_samples:
if not by_species.get(sample.organism.name, None):
by_species[sample.organism.name] = [sample]
else:
by_species[sample.organism.name].append(sample)
return by_species
def get_aggregated_samples(self):
""" Uses aggregate_by to return a smasher-ready sample dict. """
if self.aggregate_by == "ALL":
return {"ALL": self.get_samples()}
elif self.aggregate_by == "EXPERIMENT":
return self.get_samples_by_experiment()
else:
return self.get_samples_by_species()
def is_cross_technology(self):
""" Determine if this involves both Microarray + RNASeq"""
if len(self.get_samples().values("technology").distinct()) > 1:
return True
else:
return False
@property
def download_url(self):
""" A temporary URL from which the file can be downloaded. """
return self.create_download_url()
def create_download_url(self):
""" Create a temporary URL from which the file can be downloaded."""
if settings.RUNNING_IN_CLOUD and self.s3_bucket and self.s3_key:
return S3.generate_presigned_url(
ClientMethod="get_object",
Params={"Bucket": self.s3_bucket, "Key": self.s3_key},
ExpiresIn=(60 * 60 * 7 * 24), # 7 days in seconds.
)
else:
return None
def s3_url(self):
""" Render the resulting S3 URL """
if (self.s3_key) and (self.s3_bucket):
return "https://s3.amazonaws.com/" + self.s3_bucket + "/" + self.s3_key
else:
return None
@property
def has_email(self):
""" Returns if the email is set or not """
return bool(self.email_address)
class APIToken(models.Model):
""" Required for starting a smash job """
# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Activation
is_activated = models.BooleanField(default=False)
# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(APIToken, self).save(*args, **kwargs)
@property
def terms_and_conditions(self):
""" """
return settings.TERMS_AND_CONDITIONS
"""
# Associations
These represent the relationships between items in the other tables.
"""
class ExperimentSampleAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_sample_associations"
unique_together = ("experiment", "sample")
class ExperimentOrganismAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "experiment_organism_associations"
unique_together = ("experiment", "organism")
class DownloaderJobOriginalFileAssociation(models.Model):
downloader_job = models.ForeignKey(
"data_refinery_common.DownloaderJob", blank=False, null=False, on_delete=models.CASCADE
)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "downloaderjob_originalfile_associations"
unique_together = ("downloader_job", "original_file")
class ProcessorJobOriginalFileAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE
)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "processorjob_originalfile_associations"
unique_together = ("processor_job", "original_file")
class ProcessorJobDatasetAssociation(models.Model):
processor_job = models.ForeignKey(
"data_refinery_common.ProcessorJob", blank=False, null=False, on_delete=models.CASCADE
)
dataset = models.ForeignKey(Dataset, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "processorjob_dataset_associations"
class OriginalFileSampleAssociation(models.Model):
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "original_file_sample_associations"
unique_together = ("original_file", "sample")
class SampleResultAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_result_associations"
unique_together = ("result", "sample")
class SampleComputedFileAssociation(models.Model):
sample = models.ForeignKey(Sample, blank=False, null=False, on_delete=models.CASCADE)
computed_file = models.ForeignKey(
ComputedFile, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "sample_computed_file_associations"
unique_together = ("sample", "computed_file")
class ExperimentResultAssociation(models.Model):
experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)
class Meta:
db_table = "experiment_result_associations"
unique_together = ("result", "experiment")
class CompendiumResultOrganismAssociation(models.Model):
compendium_result = models.ForeignKey(
CompendiumResult, blank=False, null=False, on_delete=models.CASCADE
)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)
class Meta:
db_table = "compendium_result_organism_associations"
unique_together = ("compendium_result", "organism")
<|code_end|>
common/data_refinery_common/rna_seq.py
<|code_start|>from typing import Dict, List
from django.db.models import OuterRef, Subquery
from data_refinery_common.job_lookup import ProcessorEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import ComputationalResult, ComputedFile, Experiment
from data_refinery_common.utils import get_env_variable
logger = get_and_configure_logger(__name__)
# Some experiments won't be entirely processed, but we'd still like to
# make the samples we can process available. This means we need to run
# tximport on the experiment before 100% of the samples are processed
# individually.
# This idea has been discussed here: https://github.com/AlexsLemonade/refinebio/issues/909
# The consensus is that this is a good idea, but that we need a cutoff
# to determine which experiments have enough data to have tximport run
# on them early. Candace ran an experiment to find these cutoff
# values and recorded the results of this experiment here:
# https://github.com/AlexsLemonade/tximport_partial_run_tests/pull/3
# The gist of that discussion/experiment is that we need two cutoff
# values, one for a minimum size experiment that can be processed
# early and the percentage of completion necessary before we can
# run tximport on the experiment. The values we decided on are:
EARLY_TXIMPORT_MIN_SIZE = 25
EARLY_TXIMPORT_MIN_PERCENT = 0.80
def should_run_tximport(experiment: Experiment, results, is_tximport_job: bool):
""" Returns whether or not the experiment is eligible to have tximport
run on it.
results is a queryset of ComputationalResults for the samples that had salmon quant run on them.
"""
num_quantified = results.count()
if num_quantified == 0:
return False
num_salmon_versions = (
results.filter(organism_index__salmon_version__isnull=False)
.values_list("organism_index__salmon_version")
.distinct()
.count()
)
if num_salmon_versions > 1:
# Tximport requires that all samples are processed with the same salmon version
# https://github.com/AlexsLemonade/refinebio/issues/1496
return False
eligible_samples = experiment.samples.filter(source_database="SRA", technology="RNA-SEQ")
num_eligible_samples = eligible_samples.count()
if num_eligible_samples == 0:
return False
percent_complete = num_quantified / num_eligible_samples
if percent_complete == 1.0:
# If an experiment is fully quantified then we should run
# tximport regardless of its size.
return True
if (
is_tximport_job
and num_eligible_samples >= EARLY_TXIMPORT_MIN_SIZE
and percent_complete >= EARLY_TXIMPORT_MIN_PERCENT
):
return True
else:
return False
def get_quant_results_for_experiment(experiment: Experiment, filter_old_versions=True):
"""Returns a queryset of salmon quant results from `experiment`."""
# Subquery to calculate quant results
# https://docs.djangoproject.com/en/2.2/ref/models/expressions/#subquery-expressions
# Salmon version gets saved as what salmon outputs, which includes this prefix.
current_salmon_version = "salmon " + get_env_variable("SALMON_VERSION", "0.13.1")
if filter_old_versions:
eligible_results = ComputationalResult.objects.prefetch_related("organism_index").filter(
organism_index__salmon_version=current_salmon_version
)
else:
eligible_results = ComputationalResult.objects.all()
# A result is only eligible to be used if it actually got uploaded.
eligible_results = eligible_results.select_related("computedfile").filter(
computedfile__s3_bucket__isnull=False, computedfile__s3_key__isnull=False
)
# Calculate the computational results sorted that are associated with a given sample (
# referenced from the top query)
newest_computational_results = eligible_results.filter(
samples=OuterRef("id"), processor__name=ProcessorEnum.SALMON_QUANT.value["name"],
).order_by("-created_at")
# Annotate each sample in the experiment with the id of the most recent computational result
computational_results_ids = (
experiment.samples.all()
.annotate(
latest_computational_result_id=Subquery(newest_computational_results.values("id")[:1])
)
.filter(latest_computational_result_id__isnull=False)
.values_list("latest_computational_result_id", flat=True)
)
# return the computational results that match those ids
return ComputationalResult.objects.all().filter(id__in=computational_results_ids)
def get_quant_files_for_results(results: List[ComputationalResult]):
"""Returns a list of salmon quant results from `experiment`."""
quant_files = []
for result in results:
quant_sf_file = result.get_quant_sf_file()
if quant_sf_file:
quant_files.append(quant_sf_file)
else:
logger.exception(
"Salmon quant result found without quant.sf ComputedFile!",
quant_result=result.id,
sample=result.samples.first(),
)
raise Exception("Salmon quant result found without quant.sf ComputedFile!")
return quant_files
ENA_DOWNLOAD_URL_TEMPLATE = (
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/{short_accession}{sub_dir}"
"/{long_accession}/{long_accession}{read_suffix}.fastq.gz"
)
ENA_SUB_DIR_PREFIX = "/00"
def _build_ena_file_url(run_accession: str, read_suffix=""):
# ENA has a weird way of nesting data: if the run accession is
# greater than 9 characters long then there is an extra
# sub-directory in the path which is "00" + the last digit of
# the run accession.
sub_dir = ""
if len(run_accession) > 9:
sub_dir = ENA_SUB_DIR_PREFIX + run_accession[-1]
return ENA_DOWNLOAD_URL_TEMPLATE.format(
short_accession=run_accession[:6],
sub_dir=sub_dir,
long_accession=run_accession,
read_suffix=read_suffix,
)
<|code_end|>
workers/data_refinery_workers/processors/salmon.py
<|code_start|>import glob
import io
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import tarfile
from typing import Dict, List
from django.conf import settings
from django.db import transaction
from django.utils import timezone
import boto3
import numpy as np
import pandas as pd
import untangle
from botocore.client import Config
from data_refinery_common.job_lookup import Downloaders, PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Experiment,
ExperimentSampleAssociation,
OrganismIndex,
Pipeline,
Processor,
Sample,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_common.rna_seq import (
get_quant_files_for_results,
get_quant_results_for_experiment,
should_run_tximport,
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client("s3", config=Config(signature_version="s3v4"))
logger = get_and_configure_logger(__name__)
JOB_DIR_PREFIX = "processor_job_"
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
def _set_job_prefix(job_context: Dict) -> Dict:
""" Sets the `job_dir_prefix` value in the job context object."""
job_context["job_dir_prefix"] = JOB_DIR_PREFIX + str(job_context["job_id"])
return job_context
def _prepare_files(job_context: Dict) -> Dict:
"""Moves the file(s) from the raw directory to the temp directory.
Also adds the keys "input_file_path" and "output_directory" to
job_context so everything is prepared for processing. If the reads
are paired then there will also be an "input_file_path_2" key
added to job_context for the second read.
"""
logger.debug("Preparing files..")
# Create a directory specific to this processor job combo.
# (A single sample could belong to multiple experiments, meaning
# that it could be run more than once, potentially even at the
# same time.)
job_context["work_dir"] = os.path.join(LOCAL_ROOT_DIR, job_context["job_dir_prefix"]) + "/"
os.makedirs(job_context["work_dir"], exist_ok=True)
original_files = job_context["original_files"]
job_context["input_file_path"] = original_files[0].absolute_file_path
if not os.path.exists(job_context["input_file_path"]):
logger.error(
"Was told to process a non-existent file - why did this happen?",
input_file_path=job_context["input_file_path"],
processor_job=job_context["job_id"],
)
job_context["job"].failure_reason = "Missing input file: " + str(
job_context["input_file_path"]
)
job_context["success"] = False
return job_context
if len(original_files) == 2:
job_context["input_file_path_2"] = original_files[1].absolute_file_path
if not os.path.exists(job_context["input_file_path_2"]):
logger.error(
"Was told to process a non-existent file2 - why did this happen?",
input_file_path=job_context["input_file_path_2"],
processor_job=job_context["job_id"],
)
job_context["job"].failure_reason = "Missing input file2: " + str(
job_context["input_file_path_2"]
)
job_context["success"] = False
return job_context
# There should only ever be one per Salmon run
sample = job_context["original_files"][0].samples.first()
# This check was added to ensure that we don't process any RNA-Seq
# samples from GEO, but for the time being we really don't want to
# run salmon on anything that's not from SRA. See
# https://github.com/AlexsLemonade/refinebio/issues/966 for more
# information.
if sample.technology != "RNA-SEQ" or sample.source_database != "SRA":
failure_reason = (
"The sample for this job either was not RNA-Seq or was not from the " "SRA database."
)
job_context["failure_reason"] = failure_reason
logger.error(failure_reason, sample=sample, processor_job=job_context["job_id"])
# No need to retry and fail more than once for this reason.
job_context["success"] = False
job_context["job"].failure_reason = failure_reason
job_context["job"].no_retry = True
return job_context
# Detect that this is an SRA file from the source URL
if ("ncbi.nlm.nih.gov" in job_context["original_files"][0].source_url) or (
job_context["input_file_path"][-4:].upper() == ".SRA"
):
new_input_file_path = os.path.join(job_context["work_dir"], original_files[0].filename)
shutil.copyfile(job_context["input_file_path"], new_input_file_path)
job_context["input_file_path"] = new_input_file_path
job_context["sra_input_file_path"] = new_input_file_path
if job_context.get("input_file_path_2", False):
new_input_file_path = os.path.join(job_context["work_dir"], original_files[1].filename)
shutil.copyfile(job_context["input_file_path_2"], new_input_file_path)
job_context["input_file_path_2"] = new_input_file_path
job_context["sample_accession_code"] = sample.accession_code
job_context["sample"] = sample
job_context["samples"] = [] # This will only be populated in the `tximport` job
job_context["organism"] = job_context["sample"].organism
job_context["success"] = True
job_context["output_directory"] = job_context["work_dir"] + sample.accession_code + "_output/"
os.makedirs(job_context["output_directory"], exist_ok=True)
job_context["salmontools_directory"] = job_context["work_dir"] + "salmontools/"
os.makedirs(job_context["salmontools_directory"], exist_ok=True)
job_context["salmontools_archive"] = job_context["work_dir"] + "salmontools-result.tar.gz"
timestamp = str(timezone.now().timestamp()).split(".")[0]
job_context["output_archive"] = job_context["work_dir"] + "result-" + timestamp + ".tar.gz"
job_context["computed_files"] = []
job_context["smashable_files"] = []
return job_context
def _determine_index_length_sra(job_context: Dict) -> Dict:
"""
Use the sra-stat tool to determine length
ex:
sra-stat -x --statistics ERR1562482.sra
"""
command_str = "sra-stat -x --statistics {sra_file}"
formatted_command = command_str.format(sra_file=job_context["sra_input_file_path"])
logger.debug(
"Running sra-stat using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"],
)
completed_command = subprocess.run(
formatted_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
respo = completed_command.stdout.decode().strip()
try:
stats = untangle.parse(respo)
except ValueError:
logger.error(
"Unable to parse sra-stat output!", respo=str(respo), command=formatted_command
)
# Different SRA files can create different output formats, somehow.
# This mess tries every output method we can to parse these stats.
# If it's so messed up we don't know, default to short.
try:
bases_count = int(stats.Run.Bases["count"])
reads_count = int(stats.Run.Statistics["nspots"]) * int(stats.Run.Statistics["nreads"])
job_context["sra_num_reads"] = int(stats.Run.Statistics["nreads"])
job_context["index_length_raw"] = int(bases_count / reads_count)
except Exception:
try:
job_context["sra_num_reads"] = int(stats.Run.Statistics["nreads"])
spot_count_mates = int(stats.Run["spot_count_mates"])
base_count_bio_mates = int(stats.Run["base_count_bio_mates"])
reads_count = spot_count_mates * int(stats.Run.Statistics["nreads"])
job_context["index_length_raw"] = int(base_count_bio_mates / reads_count)
except Exception:
try:
job_context["index_length_raw"] = int(stats.Run.Statistics.Read[0]["average"])
except Exception:
# sra-stat will sometimes put warnings in the XML stream, so we end up with nothing valid to parse.
# https://github.com/ncbi/sra-tools/issues/192
logger.error(
"Unable to determine index length! Defaulting to small",
file=job_context["sra_input_file_path"],
)
job_context["index_length_raw"] = -1
if job_context["index_length_raw"] > 75:
job_context["index_length"] = "long"
else:
job_context["index_length"] = "short"
if not job_context.get("sra_num_reads", None):
try:
sample = job_context["sample"]
for exp in sample.experiments.all():
for annotation in exp.experimentannotation_set.all():
if annotation.data.get("library_layout", "").upper() == "PAIRED":
job_context["sra_num_reads"] = 2
return job_context
if annotation.data.get("library_layout", "").upper() == "SINGLE":
job_context["sra_num_reads"] = 1
return job_context
except Exception as e:
logger.exception(
"Problem trying to determine library strategy (single/paired)!",
file=job_context["sra_input_file_path"],
)
job_context[
"job"
].failure_reason = "Unable to determine library strategy (single/paired): " + str(e)
job_context["success"] = False
return job_context
if not job_context.get("sra_num_reads", None):
logger.error(
"Completely unable to determine library strategy (single/paired)!",
file=job_context["sra_input_file_path"],
)
job_context["job"].failure_reason = "Unable to determine library strategy (single/paired)"
job_context["success"] = False
return job_context
def _determine_index_length(job_context: Dict) -> Dict:
"""Determines whether to use the long or short salmon index.
Adds the key 'index_length' to the job_context with a value of
'short' if the short index is appropriate or 'long' if the long
index is appropriate. For more information on index length see the
_create_index function of the transcriptome_index processor.
"""
if job_context.get("sra_input_file_path", None):
return _determine_index_length_sra(job_context)
logger.debug("Determining index length..")
total_base_pairs = 0
number_of_reads = 0
counter = 1
if ".gz" == job_context["input_file_path"][-3:]:
cat = "zcat"
else:
cat = "cat"
# zcat unzips the file provided and dumps the output to STDOUT.
# It is installed by default in Debian so it should be included
# in every docker image already.
with subprocess.Popen(
[cat, job_context["input_file_path"]], stdout=subprocess.PIPE, universal_newlines=True
) as process:
for line in process.stdout:
# In the FASTQ file format, there are 4 lines for each
# read. Three of these contain metadata about the
# read. The string representing the read itself is found
# on the second line of each quartet.
if counter % 4 == 2:
total_base_pairs += len(line.replace("\n", ""))
number_of_reads += 1
counter += 1
if "input_file_path_2" in job_context:
if ".gz" == job_context["input_file_path_2"][-3:]:
cat = "zcat"
else:
cat = "cat"
with subprocess.Popen(
[cat, job_context["input_file_path_2"]], stdout=subprocess.PIPE, universal_newlines=True
) as process:
for line in process.stdout:
if counter % 4 == 2:
total_base_pairs += len(line.replace("\n", ""))
number_of_reads += 1
counter += 1
if number_of_reads == 0:
logger.error(
"Unable to determine number_of_reads for job.",
input_file_1=job_context.get("input_file_path"),
input_file_2=job_context.get("input_file_path_2"),
job_id=job_context["job"].id,
)
job_context["job"].failure_reason = "Unable to determine number_of_reads."
job_context["job"].no_retry = True
job_context["success"] = False
return job_context
index_length_raw = total_base_pairs / number_of_reads
# Put the raw index length into the job context in a new field for regression testing purposes
job_context["index_length_raw"] = index_length_raw
if index_length_raw > 75:
job_context["index_length"] = "long"
else:
job_context["index_length"] = "short"
return job_context
def _find_or_download_index(job_context: Dict) -> Dict:
"""Finds the appropriate Salmon Index for this experiment.
Salmon documentation states:
"If you want to use Salmon in quasi-mapping-based mode, then you
first have to build an Salmon index for your transcriptome."
We have used the Data Refinery to build these indices already,
this function retrieves the location of the correct index for the
organism and read length and adds it to the job context.
"""
logger.debug("Fetching and installing index..")
index_type = "TRANSCRIPTOME_" + job_context["index_length"].upper()
index_object = (
OrganismIndex.objects.filter(organism=job_context["organism"], index_type=index_type)
.order_by("-created_at")
.first()
)
if not index_object:
logger.error(
"Could not run Salmon processor without index for organism",
organism=job_context["organism"],
processor_job=job_context["job_id"],
index_type=index_type,
)
job_context["job"].no_retry = True
job_context["job"].failure_reason = "Missing transcriptome index. (" + index_type + ")"
job_context["success"] = False
return job_context
job_context["index_directory"] = index_object.absolute_directory_path
try:
# The organism index only needs to be downloaded from S3 once per
# organism per index length per EBS volume. We don't know if
# another job has started downloading it yet, started extracting
# it yet, or already finished and been symlinked to a common
# location. Therefore check to see if it's happened before we
# complete each step.
version_info_path = job_context["index_directory"] + "/versionInfo.json"
# Something very bad happened and now there are corrupt indexes installed. Nuke 'em.
if os.path.exists(version_info_path) and (os.path.getsize(version_info_path) == 0):
logger.error("We have to nuke a zero-valued index directory: " + version_info_path)
shutil.rmtree(job_context["index_directory"], ignore_errors=True)
os.makedirs(job_context["index_directory"], exist_ok=True)
index_tarball = None
if not os.path.exists(version_info_path):
# Index is not installed yet, so download it.
index_file = ComputedFile.objects.filter(result=index_object.result)[0]
index_tarball = index_file.sync_from_s3(
path=job_context["work_dir"] + index_file.filename
)
index_hard_dir = None
if not os.path.exists(version_info_path):
# Index is still not installed yet, so extract it.
# Create a temporary location to download the index to, which
# can be symlinked to once extraction is complete.
index_hard_dir = os.path.join(LOCAL_ROOT_DIR, job_context["job_dir_prefix"]) + "_index/"
os.makedirs(index_hard_dir)
with tarfile.open(index_tarball, "r:gz") as index_archive:
index_archive.extractall(index_hard_dir)
if not os.path.exists(version_info_path):
# Index is still not installed yet, so symlink the files we
# have to where they are expected to reside.
os.makedirs(job_context["index_directory"], exist_ok=True)
index_files = [
"versionInfo.json",
"duplicate_clusters.tsv",
"hash.bin",
"indexing.log",
"refInfo.json",
"sa.bin",
"genes_to_transcripts.txt",
"header.json",
"quasi_index.log",
"rsd.bin",
"txpInfo.bin",
]
for subfile in index_files:
os.symlink(index_hard_dir + subfile, job_context["index_directory"] + "/" + subfile)
elif index_hard_dir:
# We have failed the race.
logger.error("We have failed the index extraction race! Removing dead trees.")
shutil.rmtree(index_hard_dir, ignore_errors=True)
except Exception as e:
error_template = "Failed to download or extract transcriptome index for organism {0}: {1}"
error_message = error_template.format(str(job_context["organism"]), str(e))
logger.exception(error_message, processor_job=job_context["job_id"])
job_context["job"].failure_reason = error_message
job_context["success"] = False
# Make sure we don't leave an empty index directory lying around.
shutil.rmtree(index_hard_dir, ignore_errors=True)
return job_context
# The index tarball contains a directory named index, so add that
# to the path where we should put it.
job_context["genes_to_transcripts_path"] = os.path.join(
job_context["index_directory"], "genes_to_transcripts.txt"
)
job_context["organism_index"] = index_object
return job_context
def _run_tximport_for_experiment(
job_context: Dict, experiment: Experiment, quant_files: List[ComputedFile]
) -> Dict:
# Download all the quant.sf fles for this experiment. Write all
# their paths to a file so we can pass a path to that to
# tximport.R rather than having to pass in one argument per
# sample.
tximport_path_list_file = job_context["work_dir"] + "tximport_inputs.txt"
quant_file_paths = {}
with open(tximport_path_list_file, "w") as input_list:
for quant_file in quant_files:
# We create a directory in the work directory for each (quant.sf) file, as
# tximport assigns column names based on the parent directory name,
# and we need those names so that we can reassociate withe samples later.
# ex., a file with absolute_file_path: /processor_job_1/SRR123_output/quant.sf
# downloads to: /processor_job_2/SRR123_output/quant.sf
# So the result file has frame "SRR123_output", which we can associate with sample SRR123
sample_output = (
job_context["work_dir"] + str(quant_file.absolute_file_path.split("/")[-2]) + "/"
)
os.makedirs(sample_output, exist_ok=True)
quant_work_path = sample_output + quant_file.filename
quant_file_path = quant_file.get_synced_file_path(path=quant_work_path)
input_list.write(quant_file_path + "\n")
quant_file_paths[quant_file_path] = os.stat(quant_file_path).st_size
rds_filename = "txi_out.RDS"
rds_file_path = job_context["work_dir"] + rds_filename
tpm_filename = "gene_lengthScaledTPM.tsv"
tpm_file_path = job_context["work_dir"] + tpm_filename
result = ComputationalResult()
cmd_tokens = [
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/tximport.R",
"--file_list",
tximport_path_list_file,
"--gene2txmap",
job_context["genes_to_transcripts_path"],
"--rds_file",
rds_file_path,
"--tpm_file",
tpm_file_path,
]
result.time_start = timezone.now()
logger.debug(
"Running tximport with: %s",
str(cmd_tokens),
processor_job=job_context["job_id"],
experiment=experiment.id,
)
try:
tximport_result = subprocess.run(cmd_tokens, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
raise utils.ProcessorJobError(
"Encountered error in R code while running tximport.R: {}".format(str(e)),
success=False,
experiment=experiment.id,
)
if tximport_result.returncode != 0:
raise utils.ProcessorJobError(
"Found non-zero exit code from R code while running tximport.R: {}".format(
tximport_result.stderr.decode().strip()
),
success=False,
experiment=experiment.id,
quant_files=quant_files,
cmd_tokens=cmd_tokens,
quant_file_paths=quant_file_paths,
)
result.time_end = timezone.now()
result.commands.append(" ".join(cmd_tokens))
result.is_ccdl = True
try:
processor_key = "TXIMPORT"
result.processor = utils.find_processor(processor_key)
except Exception as e:
raise utils.ProcessorJobError(
"Failed to set processor: {}".format(e), success=False, processor_key=processor_key
)
result.save()
job_context["pipeline"].steps.append(result.id)
# Associate this result with all samples in this experiment.
# TODO: This may not be completely sensible, because `tximport` is
# done at experiment level, not at sample level.
# Could be very problematic if SRA's data model allows many
# Experiments to one Run.
# https://github.com/AlexsLemonade/refinebio/issues/297
for sample in experiment.samples.all():
s_r = SampleResultAssociation(sample=sample, result=result)
s_r.save()
rds_file = ComputedFile()
rds_file.absolute_file_path = rds_file_path
rds_file.filename = rds_filename
rds_file.result = result
rds_file.is_smashable = False
rds_file.is_qc = False
rds_file.is_public = True
rds_file.calculate_sha1()
rds_file.calculate_size()
rds_file.save()
job_context["computed_files"].append(rds_file)
# Split the tximport result into smashable subfiles
data = pd.read_csv(tpm_file_path, sep="\t", header=0, index_col=0)
individual_files = []
frames = np.split(data, len(data.columns), axis=1)
for frame in frames:
# Create sample-specific TPM file.
sample_file_name = frame.columns.values[0] + "_" + tpm_filename
frame_path = os.path.join(job_context["work_dir"], sample_file_name)
frame.to_csv(frame_path, sep="\t", encoding="utf-8")
# The frame column header is based off of the path, which includes _output.
sample_accession_code = frame.columns.values[0].replace("_output", "")
sample = Sample.objects.get(accession_code=sample_accession_code)
computed_file = ComputedFile()
computed_file.absolute_file_path = frame_path
computed_file.filename = sample_file_name
computed_file.result = result
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.is_public = True
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.save()
job_context["computed_files"].append(computed_file)
job_context["smashable_files"].append(computed_file)
SampleResultAssociation.objects.get_or_create(sample=sample, result=result)
# Create association with the RDS file.
SampleComputedFileAssociation.objects.get_or_create(sample=sample, computed_file=rds_file)
# Create association with TPM file.
SampleComputedFileAssociation.objects.get_or_create(
sample=sample, computed_file=computed_file
)
individual_files.append(computed_file)
job_context["samples"].append(sample)
# Salmon-processed samples aren't marked as is_processed
# until they are fully tximported, this value sets that
# for the end_job function.
job_context["tximported"] = True
job_context["individual_files"] = individual_files
return job_context
def get_tximport_inputs(job_context: Dict) -> Dict:
"""Adds to the job_context a mapping from experiments to a list of their quant files.
Checks all the experiments which contain a sample from the current
experiment. If any of them are fully processed (at least with
salmon-quant) then the return dict will include the experiment
mapping to a list of paths to the quant.sf file for each sample in
that experiment.
"""
experiments = job_context["sample"].experiments.all()
quantified_experiments = {}
for experiment in experiments:
# We only want to consider samples that we actually can run salmon on.
eligible_samples = experiment.samples.filter(source_database="SRA", technology="RNA-SEQ")
if not eligible_samples.exists():
continue
salmon_quant_results = get_quant_results_for_experiment(experiment)
is_tximport_job = "is_tximport_only" in job_context and job_context["is_tximport_only"]
first_salmon_quant_result = salmon_quant_results.first()
if is_tximport_job and first_salmon_quant_result:
# If the job is only running tximport, then index_length
# hasn't been set on the job context because we don't have
# a raw file to run it on. Therefore pull it from one of
# the result annotations.
index_length = first_salmon_quant_result.get_index_length()
if index_length:
job_context["index_length"] = index_length
elif not "index_length" in job_context:
raise utils.ProcessorJobError(
"Found quant result without an annotation specifying its index length. Why did this happen?!?",
success=False,
no_retry=True,
)
if should_run_tximport(experiment, salmon_quant_results, is_tximport_job):
quantified_experiments[experiment] = get_quant_files_for_results(salmon_quant_results)
job_context["tximport_inputs"] = quantified_experiments
return job_context
def tximport(job_context: Dict) -> Dict:
"""Run tximport R script based on input quant files and the path
of genes_to_transcripts.txt.
"""
tximport_inputs = job_context["tximport_inputs"]
quantified_experiments = 0
for experiment, quant_files in tximport_inputs.items():
job_context = _run_tximport_for_experiment(job_context, experiment, quant_files)
quantified_experiments += 1
if (
quantified_experiments == 0
and "is_tximport_job" in job_context
and job_context["is_tximport_only"]
):
raise utils.ProcessorJobError(
"Tximport job ran on no experiments... Why?!?!?", success=False, no_retry=True
)
return job_context
def _run_salmon(job_context: Dict) -> Dict:
"""Runs Salmon Quant."""
logger.debug("Running Salmon..")
# Salmon needs to be run differently for different sample types.
# SRA files also get processed differently as we don't want to use fasterq-dump to extract
# them to disk.
if job_context.get("sra_input_file_path", None):
# Single reads
if job_context["sra_num_reads"] == 1:
fifo = "/tmp/barney"
os.mkfifo(fifo)
dump_str = "fastq-dump --stdout {input_sra_file} > {fifo} &"
formatted_dump_command = dump_str.format(
input_sra_file=job_context["sra_input_file_path"], fifo=fifo
)
dump_po = subprocess.Popen(
formatted_dump_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
command_str = (
"salmon --no-version-check quant -l A -i {index} "
"-r {fifo} -p 16 -o {output_directory} --seqBias --dumpEq --writeUnmappedNames"
)
formatted_command = command_str.format(
index=job_context["index_directory"],
input_sra_file=job_context["sra_input_file_path"],
fifo=fifo,
output_directory=job_context["output_directory"],
)
# Paired are trickier
else:
# Okay, for some reason I can't explain, this only works in the temp directory,
# otherwise the `tee` part will only output to one or the other of the streams (non-deterministically),
# but not both. This doesn't appear to happen if the fifos are in tmp.
alpha = "/tmp/alpha"
os.mkfifo(alpha)
beta = "/tmp/beta"
os.mkfifo(beta)
dump_str = "fastq-dump --stdout --split-files -I {input_sra_file} | tee >(grep '@.*\.1\s' -A3 --no-group-separator > {fifo_alpha}) >(grep '@.*\.2\s' -A3 --no-group-separator > {fifo_beta}) > /dev/null &"
formatted_dump_command = dump_str.format(
input_sra_file=job_context["sra_input_file_path"], fifo_alpha=alpha, fifo_beta=beta
)
dump_po = subprocess.Popen(
formatted_dump_command,
shell=True,
executable="/bin/bash",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
command_str = (
"salmon --no-version-check quant -l A -i {index} "
"-1 {fifo_alpha} -2 {fifo_beta} -p 16 -o {output_directory} --seqBias --dumpEq --writeUnmappedNames"
)
formatted_command = command_str.format(
index=job_context["index_directory"],
input_sra_file=job_context["sra_input_file_path"],
fifo_alpha=alpha,
fifo_beta=beta,
output_directory=job_context["output_directory"],
)
else:
if "input_file_path_2" in job_context:
second_read_str = " -2 {}".format(job_context["input_file_path_2"])
# Rob recommends 16 threads/process, which fits snugly on an x1 at 8GB RAM per Salmon container:
# (2 threads/core * 16 cores/socket * 64 vCPU) / (1TB/8GB) = ~17
command_str = (
"salmon --no-version-check quant -l A --biasSpeedSamp 5 -i {index}"
" -1 {input_one}{second_read_str} -p 16 -o {output_directory}"
" --seqBias --gcBias --dumpEq --writeUnmappedNames"
)
formatted_command = command_str.format(
index=job_context["index_directory"],
input_one=job_context["input_file_path"],
second_read_str=second_read_str,
output_directory=job_context["output_directory"],
)
else:
# Related: https://github.com/COMBINE-lab/salmon/issues/83
command_str = (
"salmon --no-version-check quant -l A -i {index}"
" -r {input_one} -p 16 -o {output_directory}"
" --seqBias --dumpEq --writeUnmappedNames"
)
formatted_command = command_str.format(
index=job_context["index_directory"],
input_one=job_context["input_file_path"],
output_directory=job_context["output_directory"],
)
logger.debug(
"Running Salmon Quant using the following shell command: %s",
formatted_command,
processor_job=job_context["job_id"],
)
# Salmon probably shouldn't take longer than three hours.
timeout = 60 * 60 * 3
job_context["time_start"] = timezone.now()
try:
completed_command = subprocess.run(
formatted_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=timeout,
)
except subprocess.TimeoutExpired:
failure_reason = "Salmon timed out because it failed to complete within 3 hours."
logger.error(
failure_reason,
sample_accesion_code=job_context["sample"].accession_code,
processor_job=job_context["job_id"],
)
job_context["job"].failure_reason = failure_reason
job_context["job"].no_retry = True
job_context["success"] = False
return job_context
job_context["time_end"] = timezone.now()
if completed_command.returncode == 1:
stderr = completed_command.stderr.decode().strip()
error_start = stderr.upper().find("ERROR:")
error_start = error_start if error_start != -1 else 0
logger.error(
"Shell call to salmon failed with error message: %s",
stderr[error_start:],
processor_job=job_context["job_id"],
)
# If salmon has an error exit code then we don't want to retry it.
job_context["job"].no_retry = True
job_context["job"].failure_reason = (
"Shell call to salmon failed because: " + stderr[error_start:]
)
job_context["success"] = False
else:
result = ComputationalResult()
result.commands.append(formatted_command)
result.time_start = job_context["time_start"]
result.time_end = job_context["time_end"]
result.organism_index = job_context["organism_index"]
result.is_ccdl = True
try:
processor_key = "SALMON_QUANT"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
# Zip up the output of Salmon Quant
try:
with tarfile.open(job_context["output_archive"], "w:gz") as tar:
tar.add(job_context["output_directory"], arcname=os.sep)
except Exception:
logger.exception(
"Exception caught while zipping processed directory %s",
job_context["output_directory"],
processor_job=job_context["job_id"],
)
failure_template = "Exception caught while zipping processed directory {}"
job_context["job"].failure_reason = failure_template.format(
job_context["output_archive"]
)
job_context["success"] = False
return job_context
salmon_quant_archive = ComputedFile()
salmon_quant_archive.absolute_file_path = job_context["output_archive"]
salmon_quant_archive.filename = os.path.split(job_context["output_archive"])[-1]
salmon_quant_archive.calculate_sha1()
salmon_quant_archive.calculate_size()
salmon_quant_archive.is_public = True
salmon_quant_archive.is_smashable = False
salmon_quant_archive.is_qc = False
quant_file = ComputedFile()
quant_file.s3_bucket = S3_BUCKET_NAME
timestamp = str(timezone.now().timestamp()).split(".")[0]
quant_file.s3_key = "quant_files/sample_{0}_{1}_quant.sf".format(
job_context["sample"].id, timestamp
)
quant_file.filename = "quant.sf"
quant_file.absolute_file_path = job_context["output_directory"] + "quant.sf"
quant_file.is_public = False
quant_file.is_smashable = False
quant_file.is_qc = False
quant_file.calculate_sha1()
quant_file.calculate_size()
# If we're running in the cloud we need to upload the quant.sf
# file so that it can be used by a job running on any machine
# to run tximport. We can't use sync_to_s3 though because we
# have to sync it before we can save the file so it cannot be
# discovered by other jobs before it is uploaded.
if settings.RUNNING_IN_CLOUD:
try:
S3.upload_file(
quant_file.absolute_file_path,
quant_file.s3_bucket,
quant_file.s3_key,
ExtraArgs={"ACL": "public-read", "StorageClass": "STANDARD_IA"},
)
except Exception as e:
logger.exception(
e, processor_job=job_context["job_id"], sample=job_context["sample"].id
)
failure_template = "Exception caught while uploading quantfile to S3: {}"
job_context["job"].failure_reason = failure_template.format(
quant_file.absolute_file_path
)
job_context["success"] = False
return job_context
# Here select_for_update() is used as a mutex that forces multiple
# jobs to execute this block of code in serial manner. See:
# https://docs.djangoproject.com/en/1.11/ref/models/querysets/#select-for-update
# Theorectically any rows in any table can be locked here, we're
# locking all existing rows in ComputationalResult table.
with transaction.atomic():
ComputationalResult.objects.select_for_update()
result.save()
job_context["quant_result"] = result
quant_file.result = result
quant_file.save()
job_context["result"] = result
job_context["pipeline"].steps.append(result.id)
SampleResultAssociation.objects.get_or_create(
sample=job_context["sample"], result=result
)
salmon_quant_archive.result = result
salmon_quant_archive.save()
job_context["computed_files"].append(salmon_quant_archive)
kv = ComputationalResultAnnotation()
kv.data = {
"index_length": job_context["index_length"],
"index_length_get": job_context.get("index_length_raw", None),
}
kv.result = result
kv.is_public = True
kv.save()
try:
with open(
os.path.join(job_context["output_directory"], "lib_format_counts.json")
) as lfc_file:
format_count_data = json.load(lfc_file)
kv = ComputationalResultAnnotation()
kv.data = format_count_data
kv.result = result
kv.is_public = True
kv.save()
except Exception:
# See: https://github.com/AlexsLemonade/refinebio/issues/1167
logger.exception(
"Error parsing Salmon lib_format_counts JSON output!",
processor_job=job_context["job_id"],
)
try:
with open(
os.path.join(job_context["output_directory"], "aux_info", "meta_info.json")
) as mi_file:
meta_info = json.load(mi_file)
kv = ComputationalResultAnnotation()
kv.data = meta_info
kv.result = result
kv.is_public = True
kv.save()
except Exception:
# See: https://github.com/AlexsLemonade/refinebio/issues/1167
logger.exception(
"Error parsing Salmon meta_info JSON output!", processor_job=job_context["job_id"]
)
job_context["success"] = True
return job_context
def _run_salmontools(job_context: Dict) -> Dict:
""" Run Salmontools to extract unmapped genes. """
logger.debug("Running SalmonTools ...")
unmapped_filename = job_context["output_directory"] + "aux_info/unmapped_names.txt"
command_str = "salmontools extract-unmapped -u {unmapped_file} -o {output} "
output_prefix = job_context["salmontools_directory"] + "unmapped_by_salmon"
command_str = command_str.format(unmapped_file=unmapped_filename, output=output_prefix)
if "input_file_path_2" in job_context:
command_str += "-1 {input_1} -2 {input_2}"
command_str = command_str.format(
input_1=job_context["input_file_path"], input_2=job_context["input_file_path_2"]
)
else:
command_str += "-r {input_1}"
command_str = command_str.format(input_1=job_context["input_file_path"])
start_time = timezone.now()
logger.debug(
"Running the following SalmonTools command: %s",
command_str,
processor_job=job_context["job_id"],
)
completed_command = subprocess.run(
command_str.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
end_time = timezone.now()
# As of SalmonTools 0.1.0, completed_command.returncode is always 0,
# (even if error happens). completed_command.stderr is not totally
# reliable either, because it will output the following line even
# when the execution succeeds:
# "There were <N> unmapped reads\n"
# in which "<N>" is the number of lines in input unmapped_names.txt.
#
# As a workaround, we are using a regular expression here to test
# the status of SalmonTools execution. Any text in stderr that is
# not in the above format is treated as error message.
status_str = completed_command.stderr.decode().strip()
success_pattern = r"^There were \d+ unmapped reads$"
if re.match(success_pattern, status_str):
# Zip up the output of salmontools
try:
with tarfile.open(job_context["salmontools_archive"], "w:gz") as tar:
tar.add(job_context["salmontools_directory"], arcname=os.sep)
except Exception:
logger.exception(
"Exception caught while zipping processed directory %s",
job_context["salmontools_directory"],
processor_job=job_context["job_id"],
)
failure_template = "Exception caught while zipping salmontools directory {}"
job_context["job"].failure_reason = failure_template.format(
job_context["salmontools_archive"]
)
job_context["success"] = False
return job_context
result = ComputationalResult()
result.commands.append(command_str)
result.time_start = start_time
result.time_end = end_time
result.is_ccdl = True
try:
processor_key = "SALMONTOOLS"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context["pipeline"].steps.append(result.id)
assoc = SampleResultAssociation()
assoc.sample = job_context["sample"]
assoc.result = result
assoc.save()
computed_file = ComputedFile()
computed_file.filename = job_context["salmontools_archive"].split("/")[-1]
computed_file.absolute_file_path = job_context["salmontools_archive"]
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.is_public = True
computed_file.is_smashable = False
computed_file.is_qc = True
computed_file.result = result
computed_file.save()
job_context["computed_files"].append(computed_file)
assoc = SampleComputedFileAssociation()
assoc.sample = job_context["sample"]
assoc.computed_file = computed_file
assoc.save()
job_context["result"] = result
job_context["success"] = True
else: # error in salmontools
logger.error(
"Shell call to salmontools failed with error message: %s",
status_str,
processor_job=job_context["job_id"],
)
job_context["job"].failure_reason = (
"Shell call to salmontools failed because: " + status_str
)
job_context["success"] = False
return job_context
def salmon(job_id: int) -> None:
"""Main processor function for the Salmon Processor.
Runs salmon quant command line tool, specifying either a long or
short read length. Also runs Salmontools and Tximport.
"""
pipeline = Pipeline(name=PipelineEnum.SALMON.value)
final_context = utils.run_pipeline(
{"job_id": job_id, "pipeline": pipeline},
[
utils.start_job,
_set_job_prefix,
_prepare_files,
_determine_index_length,
_find_or_download_index,
_run_salmon,
get_tximport_inputs,
tximport,
_run_salmontools,
utils.end_job,
],
)
return final_context
<|code_end|>
|
Processing info: Sample is not available but only tximport is run on it
Ex:
https://www.refine.bio/experiments/SRP074593/gene-expression-profiling-of-drosophila-s2r-cells-following-rnai-mediated-knockdown-of-transcription-factors

We should just show the submitter supplied info, since the samples aren't really processed through our pipeline
| common/data_refinery_common/migrations/0052_remove_invalid_tximport_associations.py
<|code_start|><|code_end|>
| common/data_refinery_common/migrations/0052_remove_invalid_tximport_associations.py
<|code_start|>from django.db import migrations
def remove_tximport_invalid_associations(apps, schema_editor):
""" We were associating tximport with all samples in an experiment
even though some of them were unprocessed.
Ref https://github.com/AlexsLemonade/refinebio/issues/2054 """
SampleResultAssociation = apps.get_model("data_refinery_common", "SampleResultAssociation")
SampleResultAssociation.objects.filter(
sample__is_processed=False, result__processor__name__iexact="tximport"
).delete()
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0051_remove_dataset_email_sent"),
]
operations = [
migrations.RunPython(remove_tximport_invalid_associations),
]
<|code_end|>
|
Engagement bot summary doesn't seem to be working
### Context
https://github.com/AlexsLemonade/refinebio/issues/2140
### Problem or idea
Tested today and reported no downloads in the last 7 days.
> **EngagementBot**
> There were no downloads in the last 7 days.
However there were a few that should have been counted:
```
data_refinery=> select id, created_at from data_refinery_common_dataset where is_processed='t' and email_address is not NULL order by created_at desc limit 10;
id | created_at
--------------------------------------+-------------------------------
381e86b9-aea2-4d9a-8a61-338b111fe54d | 2020-03-29 13:06:28.954778+00
eb726da7-8006-46c4-95ba-8a3bcecbd6f1 | 2020-03-26 02:26:35.196333+00
099e7b58-dce6-4d5c-a4b6-f4d049dad866 | 2020-03-25 16:19:14.656207+00
c4c086ef-6963-4fd8-b4e2-a7cf09c9eb68 | 2020-03-25 16:18:03.158765+00
0716857b-211f-484c-9e2b-3e447dd5cdaf | 2020-03-25 15:34:20.155269+00
2e7c1e1c-138b-43b1-9463-0113c64e7edb | 2020-03-25 15:19:23.577922+00
4a2ac1ef-4e9e-4945-bdf8-00d595f99c4c | 2020-03-25 15:18:21.49652+00
bbb30f4e-7b5f-4595-a47d-25f8d72e4a7c | 2020-03-25 15:03:55.391917+00
eff9cbe4-c4f5-43a6-b004-6cd36f10ea15 | 2020-03-24 22:41:51.185922+00
```
### Solution or next step
Investigate why these downloads are not being reported.
| api/data_refinery_api/management/commands/post_downloads_summary.py
<|code_start|>import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
import requests
from data_refinery_common.models import DatasetAnnotation
class Command(BaseCommand):
help = "Post downloads summary to slack"
def add_arguments(self, parser):
parser.add_argument(
"--days",
type=int,
default=7, # default to a week
help=("Number of days in the past for which to build the stats"),
)
parser.add_argument(
"--channel",
type=str,
default="ccdl-general",
help=("Optional parameter to choose the channel where the message will be posted."),
)
def handle(self, *args, **options):
days = options["days"]
start_time = timezone.now() - datetime.timedelta(days=-days)
annotation_queryset = DatasetAnnotation.objects.filter(
created_at__gt=start_time
).prefetch_related("dataset")
annotations = [
annotation
for annotation in annotation_queryset
if annotation.data["start"] and should_display_email(annotation.dataset.email_address)
]
unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
if unique_users:
fallback_text = "In the last {0} days, {1} users downloaded datasets from {2} locations.".format(
days, len(unique_users), len(unique_ips)
)
else:
fallback_text = "There were no downloads in the last {0} days.".format(days)
new_users = ""
returning_users = ""
for email in unique_users:
user_annotations = annotation_queryset.filter(dataset__email_address=email)
total_downloads = user_annotations.count()
unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
is_new_user = DatasetAnnotation.objects.filter(
created_at__lt=start_time, dataset__email_address=email
)
text = "{0} | {1} downloads from {2}\n".format(email, total_downloads, locations)
if is_new_user:
new_users += text
else:
returning_users += text
blocks = [
{
"type": "section",
"text": {"type": "plain_text", "emoji": True, "text": fallback_text},
}
]
if new_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,},
}
)
if returning_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,},
}
)
# Post to slack
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"channel": "#" + options["channel"],
"text": fallback_text,
"blocks": blocks,
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def should_display_email(email: str) -> bool:
""" Returns true if the given email is not associated with the CCDL suers """
if not email:
return False
return not (
email.startswith("cansav09")
or email.startswith("arielsvn")
or email.startswith("jaclyn.n.taroni")
or email.startswith("kurt.wheeler")
or email.startswith("greenescientist")
or "@alexslemonade.org" not in email
or email.startswith("miserlou")
or email.startswith("d.prasad")
or email is ("daniel.himmelstein@gmail.com")
or email is ("dv.prasad991@gmail.com")
)
def get_ip_location(remote_ip):
try:
city = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()["city"]
except Exception:
city = remote_ip
return city
<|code_end|>
| api/data_refinery_api/management/commands/post_downloads_summary.py
<|code_start|>import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
import requests
from data_refinery_common.models import DatasetAnnotation
class Command(BaseCommand):
help = "Post downloads summary to slack"
def add_arguments(self, parser):
parser.add_argument(
"--days",
type=int,
default=7, # default to a week
help=("Number of days in the past for which to build the stats"),
)
parser.add_argument(
"--channel",
type=str,
default="ccdl-general",
help=("Optional parameter to choose the channel where the message will be posted."),
)
def handle(self, *args, **options):
days = options["days"]
start_time = timezone.now() - datetime.timedelta(days=days)
annotation_queryset = DatasetAnnotation.objects.filter(
created_at__gt=start_time
).prefetch_related("dataset")
annotations = [
annotation
for annotation in annotation_queryset
if annotation.data["start"] and should_display_email(annotation.dataset.email_address)
]
unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
if unique_users:
fallback_text = "In the last {0} days, {1} users downloaded datasets from {2} locations.".format(
days, len(unique_users), len(unique_ips)
)
else:
fallback_text = "There were no downloads in the last {0} days.".format(days)
new_users = ""
returning_users = ""
for email in unique_users:
user_annotations = annotation_queryset.filter(dataset__email_address=email)
total_downloads = user_annotations.count()
unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
is_new_user = DatasetAnnotation.objects.filter(
created_at__lt=start_time, dataset__email_address=email
)
text = "{0} | {1} downloads from {2}\n".format(email, total_downloads, locations)
if is_new_user:
new_users += text
else:
returning_users += text
blocks = [
{
"type": "section",
"text": {"type": "plain_text", "emoji": True, "text": fallback_text},
}
]
if new_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,},
}
)
if returning_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,},
}
)
# Post to slack
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"channel": "#" + options["channel"],
"text": fallback_text,
"blocks": blocks,
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def should_display_email(email: str) -> bool:
""" Returns true if the given email is not associated with the CCDL suers """
if not email:
return False
return not (
email.startswith("cansav09")
or email.startswith("arielsvn")
or email.startswith("jaclyn.n.taroni")
or email.startswith("kurt.wheeler")
or email.startswith("greenescientist")
or email.startswith("miserlou")
or email.startswith("d.prasad")
or email.endswith("@alexslemonade.org")
or email is ("daniel.himmelstein@gmail.com")
or email is ("dv.prasad991@gmail.com")
)
def get_ip_location(remote_ip):
try:
data = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()
return "{0}, {1}".format(data["city"], data["country_name"])
except Exception:
return remote_ip
<|code_end|>
|
Engagement bot thinks every user is a returning user
### Context
https://alexslemonade.slack.com/archives/CRK42AL1Y/p1587988808265500
### Problem or idea
@dvenprasad says 6 of those are new users. There must be a bug in the queries it uses or something.
### Solution or next step
Fix the engagement bot so it reports new users as new users.
| api/data_refinery_api/management/commands/post_downloads_summary.py
<|code_start|>import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
import requests
from data_refinery_common.models import DatasetAnnotation
class Command(BaseCommand):
help = "Post downloads summary to slack"
def add_arguments(self, parser):
parser.add_argument(
"--days",
type=int,
default=7, # default to a week
help=("Number of days in the past for which to build the stats"),
)
parser.add_argument(
"--channel",
type=str,
default="ccdl-general",
help=("Optional parameter to choose the channel where the message will be posted."),
)
def handle(self, *args, **options):
days = options["days"]
start_time = timezone.now() - datetime.timedelta(days=days)
annotation_queryset = DatasetAnnotation.objects.filter(
created_at__gt=start_time
).prefetch_related("dataset")
annotations = [
annotation
for annotation in annotation_queryset
if annotation.data["start"] and should_display_email(annotation.dataset.email_address)
]
unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
if unique_users:
fallback_text = "In the last {0} days, {1} users downloaded datasets from {2} locations.".format(
days, len(unique_users), len(unique_ips)
)
else:
fallback_text = "There were no downloads in the last {0} days.".format(days)
new_users = ""
returning_users = ""
for email in unique_users:
user_annotations = annotation_queryset.filter(dataset__email_address=email)
total_downloads = user_annotations.count()
unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
is_new_user = DatasetAnnotation.objects.filter(
created_at__lt=start_time, dataset__email_address=email
)
text = "{0} | {1} downloads from {2}\n".format(email, total_downloads, locations)
if is_new_user:
new_users += text
else:
returning_users += text
blocks = [
{
"type": "section",
"text": {"type": "plain_text", "emoji": True, "text": fallback_text},
}
]
if new_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,},
}
)
if returning_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,},
}
)
# Post to slack
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"channel": "#" + options["channel"],
"text": fallback_text,
"blocks": blocks,
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def should_display_email(email: str) -> bool:
""" Returns true if the given email is not associated with the CCDL suers """
if not email:
return False
return not (
email.startswith("cansav09")
or email.startswith("arielsvn")
or email.startswith("jaclyn.n.taroni")
or email.startswith("kurt.wheeler")
or email.startswith("greenescientist")
or email.startswith("miserlou")
or email.startswith("d.prasad")
or email.endswith("@alexslemonade.org")
or email is ("daniel.himmelstein@gmail.com")
or email is ("dv.prasad991@gmail.com")
)
def get_ip_location(remote_ip):
try:
data = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()
return "{0}, {1}".format(data["city"], data["country_name"])
except Exception:
return remote_ip
<|code_end|>
| api/data_refinery_api/management/commands/post_downloads_summary.py
<|code_start|>import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
import requests
from data_refinery_common.models import DatasetAnnotation
class Command(BaseCommand):
help = "Post downloads summary to slack"
def add_arguments(self, parser):
parser.add_argument(
"--days",
type=int,
default=7, # default to a week
help=("Number of days in the past for which to build the stats"),
)
parser.add_argument(
"--channel",
type=str,
default="ccdl-general",
help=("Optional parameter to choose the channel where the message will be posted."),
)
def handle(self, *args, **options):
days = options["days"]
start_time = timezone.now() - datetime.timedelta(days=days)
annotation_queryset = DatasetAnnotation.objects.filter(
created_at__gt=start_time
).prefetch_related("dataset")
annotations = [
annotation
for annotation in annotation_queryset
if annotation.data["start"] and should_display_email(annotation.dataset.email_address)
]
unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
new_users = ""
returning_users = ""
total_downloads = 0
for email in unique_users:
user_annotations = annotation_queryset.filter(dataset__email_address=email)
downloads = user_annotations.count()
total_downloads += downloads
unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
is_returning_user = DatasetAnnotation.objects.filter(
created_at__lt=start_time, dataset__email_address=email
)
text = "{0} | {1} downloads from {2}\n".format(email, downloads, locations)
if is_returning_user:
returning_users += text
else:
new_users += text
if total_downloads > 0:
fallback_text = "In the last {0} days, {1} users downloaded {2} datasets from {3} locations.".format(
days, len(unique_users), total_downloads, len(unique_ips)
)
else:
fallback_text = "There were no downloads in the last {0} days.".format(days)
blocks = [
{
"type": "section",
"text": {"type": "plain_text", "emoji": True, "text": fallback_text},
}
]
if new_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,},
}
)
if returning_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,},
}
)
# Post to slack
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"channel": "#" + options["channel"],
"text": fallback_text,
"blocks": blocks,
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def should_display_email(email: str) -> bool:
""" Returns true if the given email is not associated with the CCDL suers """
if not email:
return False
return not (
email.startswith("cansav09")
or email.startswith("arielsvn")
or email.startswith("jaclyn.n.taroni")
or email.startswith("kurt.wheeler")
or email.startswith("greenescientist")
or email.startswith("miserlou")
or email.startswith("d.prasad")
or email.endswith("@alexslemonade.org")
or email is ("daniel.himmelstein@gmail.com")
or email is ("dv.prasad991@gmail.com")
)
def get_ip_location(remote_ip):
try:
data = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()
return "{0}, {1}".format(data["city"], data["country_name"])
except Exception:
return remote_ip
<|code_end|>
|
Store locations from where our users download data
### Context
https://github.com/AlexsLemonade/refinebio/pull/2216#discussion_r401233850
### Problem or idea
Our engagement bot only stores IPs from the users, but we are also interested in their locations which can change over the years.
### Solution or next step
We should modify the `post_engagement_bot_summary` command to also store the location from where people are downloading datasets.
| api/data_refinery_api/management/commands/post_downloads_summary.py
<|code_start|>import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
import requests
from data_refinery_common.models import DatasetAnnotation
class Command(BaseCommand):
help = "Post downloads summary to slack"
def add_arguments(self, parser):
parser.add_argument(
"--days",
type=int,
default=7, # default to a week
help=("Number of days in the past for which to build the stats"),
)
parser.add_argument(
"--channel",
type=str,
default="ccdl-general",
help=("Optional parameter to choose the channel where the message will be posted."),
)
def handle(self, *args, **options):
days = options["days"]
start_time = timezone.now() - datetime.timedelta(days=days)
annotation_queryset = DatasetAnnotation.objects.filter(
created_at__gt=start_time
).prefetch_related("dataset")
annotations = [
annotation
for annotation in annotation_queryset
if annotation.data["start"] and should_display_email(annotation.dataset.email_address)
]
unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
new_users = ""
returning_users = ""
total_downloads = 0
for email in unique_users:
user_annotations = annotation_queryset.filter(dataset__email_address=email)
downloads = user_annotations.count()
total_downloads += downloads
unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
is_returning_user = DatasetAnnotation.objects.filter(
created_at__lt=start_time, dataset__email_address=email
)
text = "{0} | {1} downloads from {2}\n".format(email, downloads, locations)
if is_returning_user:
returning_users += text
else:
new_users += text
if total_downloads > 0:
fallback_text = "In the last {0} days, {1} users downloaded {2} datasets from {3} locations.".format(
days, len(unique_users), total_downloads, len(unique_ips)
)
else:
fallback_text = "There were no downloads in the last {0} days.".format(days)
blocks = [
{
"type": "section",
"text": {"type": "plain_text", "emoji": True, "text": fallback_text},
}
]
if new_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,},
}
)
if returning_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,},
}
)
# Post to slack
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"channel": "#" + options["channel"],
"text": fallback_text,
"blocks": blocks,
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def should_display_email(email: str) -> bool:
""" Returns true if the given email is not associated with the CCDL suers """
if not email:
return False
return not (
email.startswith("cansav09")
or email.startswith("arielsvn")
or email.startswith("jaclyn.n.taroni")
or email.startswith("kurt.wheeler")
or email.startswith("greenescientist")
or email.startswith("miserlou")
or email.startswith("d.prasad")
or email.endswith("@alexslemonade.org")
or email is ("daniel.himmelstein@gmail.com")
or email is ("dv.prasad991@gmail.com")
)
def get_ip_location(remote_ip):
try:
data = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()
return "{0}, {1}".format(data["city"], data["country_name"])
except Exception:
return remote_ip
<|code_end|>
common/data_refinery_common/migrations/0055_auto_20200529_1230.py
<|code_start|><|code_end|>
common/data_refinery_common/models/dataset_annotation.py
<|code_start|>from django.contrib.postgres.fields import JSONField
from django.db import models
from django.utils import timezone
from data_refinery_common.models.managers import PublicObjectsManager
class DatasetAnnotation(models.Model):
""" Semi-standard information associated with a Dataset.
IMPORTANT: This data shouldn't not be exposed through an API. """
class Meta:
db_table = "dataset_annotations"
base_manager_name = "public_objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
dataset = models.ForeignKey("Dataset", blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
# Common Properties
is_public = models.BooleanField(default=False)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.pk:
self.created_at = current_time
self.last_modified = current_time
return super(DatasetAnnotation, self).save(*args, **kwargs)
<|code_end|>
| api/data_refinery_api/management/commands/post_downloads_summary.py
<|code_start|>import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
import requests
from data_refinery_common.models import DatasetAnnotation
class Command(BaseCommand):
help = "Post downloads summary to slack"
def add_arguments(self, parser):
parser.add_argument(
"--days",
type=int,
default=7, # default to a week
help=("Number of days in the past for which to build the stats"),
)
parser.add_argument(
"--channel",
type=str,
default="ccdl-general",
help=("Optional parameter to choose the channel where the message will be posted."),
)
def handle(self, *args, **options):
days = options["days"]
start_time = timezone.now() - datetime.timedelta(days=days)
annotation_queryset = DatasetAnnotation.objects.filter(
created_at__gt=start_time
).prefetch_related("dataset")
annotations = [
annotation
for annotation in annotation_queryset
if annotation.data["start"] and should_display_email(annotation.dataset.email_address)
]
# Save the locations permanently, since IP addresses can cycle over time
location_cache = dict()
for annotation in annotation_queryset:
if not "location" in annotation.data:
ip = annotation.data["ip"]
if not ip in location_cache:
location_cache[ip] = get_ip_location(ip)
annotation.data["location"] = location_cache[ip]
annotation.save()
unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
unique_locations = list(set(annotation.data["location"] for annotation in annotations))
new_users = ""
returning_users = ""
total_downloads = 0
for email in unique_users:
user_annotations = annotation_queryset.filter(dataset__email_address=email)
downloads = user_annotations.count()
total_downloads += downloads
locations = ", ".join(
list(set(annotation.data["location"] for annotation in user_annotations))
)
is_returning_user = DatasetAnnotation.objects.filter(
created_at__lt=start_time, dataset__email_address=email
)
text = "{0} | {1} downloads from {2}\n".format(email, downloads, locations)
if is_returning_user:
returning_users += text
else:
new_users += text
if total_downloads > 0:
fallback_text = "In the last {0} days, {1} users downloaded {2} datasets from {3} locations.".format(
days, len(unique_users), total_downloads, len(unique_locations)
)
else:
fallback_text = "There were no downloads in the last {0} days.".format(days)
blocks = [
{
"type": "section",
"text": {"type": "plain_text", "emoji": True, "text": fallback_text},
}
]
if new_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,},
}
)
if returning_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,},
}
)
# Post to slack
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"channel": "#" + options["channel"],
"text": fallback_text,
"blocks": blocks,
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def should_display_email(email: str) -> bool:
""" Returns true if the given email is not associated with the CCDL suers """
if not email:
return False
return not (
email.startswith("cansav09")
or email.startswith("arielsvn")
or email.startswith("jaclyn.n.taroni")
or email.startswith("kurt.wheeler")
or email.startswith("greenescientist")
or email.startswith("miserlou")
or email.startswith("d.prasad")
or email.endswith("@alexslemonade.org")
or email is ("daniel.himmelstein@gmail.com")
or email is ("dv.prasad991@gmail.com")
)
def get_ip_location(remote_ip):
try:
data = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()
return "{0}, {1}".format(data["city"], data["country_name"])
except Exception:
return remote_ip
<|code_end|>
common/data_refinery_common/migrations/0055_auto_20200529_1230.py
<|code_start|># Generated by Django 2.2.10 on 2020-05-29 12:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0054_datasetannotation"),
]
operations = [
migrations.AlterModelOptions(
name="datasetannotation", options={"base_manager_name": "objects"},
),
migrations.AlterModelManagers(name="datasetannotation", managers=[],),
migrations.AlterField(
model_name="datasetannotation",
name="is_public",
field=models.BooleanField(default=False),
),
]
<|code_end|>
common/data_refinery_common/models/dataset_annotation.py
<|code_start|>from django.contrib.postgres.fields import JSONField
from django.db import models
from django.utils import timezone
from data_refinery_common.models.managers import PublicObjectsManager
class DatasetAnnotation(models.Model):
""" Semi-standard information associated with a Dataset.
IMPORTANT: This data shouldn't not be exposed through an API. """
class Meta:
db_table = "dataset_annotations"
base_manager_name = "objects"
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
# Relations
dataset = models.ForeignKey("Dataset", blank=False, null=False, on_delete=models.CASCADE)
# Properties
data = JSONField(default=dict)
# Common Properties
is_public = models.BooleanField(default=False)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.pk:
self.created_at = current_time
self.last_modified = current_time
return super(DatasetAnnotation, self).save(*args, **kwargs)
<|code_end|>
|
There are two Illumina HiSeq 2000 platforms
<img width="259" alt="Screen_Shot_2020-05-05_at_3_37_37_PM" src="https://user-images.githubusercontent.com/15315514/81108350-93b5aa00-8ee6-11ea-98b1-8c572d59cb4d.png">
Are they the same thing? It's confusing.
| common/data_refinery_common/migrations/0057_fix_platform_accessions_spaces.py
<|code_start|><|code_end|>
| common/data_refinery_common/migrations/0057_fix_platform_accessions_spaces.py
<|code_start|>from django.db import migrations
from django.utils import timezone
from data_refinery_common.utils import get_supported_rnaseq_platforms
def remove_spaces_from_platform_accessions(apps, schema_editor):
Sample = apps.get_model("data_refinery_common", "Sample")
# get_supported_rnaseq_platforms() returns the platform names with
# spaces, but our platform accessions should have no spaces
for bad_accession in get_supported_rnaseq_platforms():
platform_accession = bad_accession.replace(" ", "")
bad_samples = Sample.objects.all().filter(platform_accession_code=bad_accession)
if not bad_samples:
continue
bad_samples.update(platform_accession_code=platform_accession, last_modified=timezone.now())
print("Updating platform accession from '%s' to '%s'" % (bad_accession, platform_accession))
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0056_auto_20200529_1230"),
]
operations = [migrations.RunPython(remove_spaces_from_platform_accessions)]
<|code_end|>
|
We should be able to detect more than one p-value column in our Illumina pipeline
### Context
#2275
### Problem or idea
It looks like right now we only detect one p-value column for each Illumina sample (under certain conditions), which causes some samples to fail processing.
### Solution or next step
Modify our p-value detection code to handle multiple p-value columns.
| foreman/data_refinery_foreman/foreman/main.py
<|code_start|>import datetime
import random
import socket
import sys
import time
import traceback
from typing import List
from django.conf import settings
from django.db.models.expressions import Q
from django.utils import timezone
import nomad
from nomad import Nomad
from nomad.api.exceptions import URLNotFoundNomadException
from data_refinery_common.job_lookup import (
SMASHER_JOB_TYPES,
Downloaders,
ProcessorPipeline,
SurveyJobTypes,
does_processor_job_have_samples,
is_file_rnaseq,
)
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
ComputedFile,
DownloaderJob,
DownloaderJobOriginalFileAssociation,
ProcessorJob,
ProcessorJobDatasetAssociation,
ProcessorJobOriginalFileAssociation,
SurveyJob,
SurveyJobKeyValue,
)
from data_refinery_common.performant_pagination.pagination import PerformantPaginator as Paginator
from data_refinery_common.utils import (
get_active_volumes,
get_env_variable,
get_env_variable_gracefully,
get_nomad_jobs_breakdown,
)
logger = get_and_configure_logger(__name__)
# Maximum number of retries, so the number of attempts will be one
# greater than this because of the first attempt
MAX_NUM_RETRIES = 2
# This can be overritten by the env var "MAX_TOTAL_JOBS"
# This number is related to the size of the nomad lead server instance.
# The larger the instance, the more jobs it can store in its memory at once.
# We've found these limits by testing:
# * t2.medium can handle 5000 jobs.
# * m5.xlarge can hanlde 20000 jobs.
DEFAULT_MAX_JOBS = 5000
PAGE_SIZE = 2000
# The number of jobs running on each currently running volume
VOLUME_WORK_DEPTH = dict()
TIME_OF_LAST_WORK_DEPTH_CHECK = timezone.now() - datetime.timedelta(minutes=10)
# The number of downloader jobs currently in the queue
DOWNLOADER_JOBS_IN_QUEUE = 0
TIME_OF_LAST_DOWNLOADER_JOB_CHECK = timezone.now() - datetime.timedelta(minutes=10)
# The desired number of active + pending jobs on a volume. Downloader jobs
# will be assigned to instances until this limit is reached.
DESIRED_WORK_DEPTH = 500
# This is the absolute max number of downloader jobs that should ever
# be queued across the whole cluster no matter how many nodes we
# have. This is important because too many downloader jobs and we take
# down NCBI.
HARD_MAX_DOWNLOADER_JOBS = 1000
# The minimum amount of time in between each iteration of the main
# loop. We could loop much less frequently than every two minutes if
# the work we do takes longer than 2 minutes, but this will prevent
# excessive spinning.
MIN_LOOP_TIME = datetime.timedelta(seconds=15)
# How frequently we dispatch Janitor jobs and clean unplaceable jobs
# out of the Nomad queue.
JANITOR_DISPATCH_TIME = datetime.timedelta(minutes=30)
# How frequently we clean up the database.
DBCLEAN_TIME = datetime.timedelta(hours=6)
# Setting this to a recent date will prevent the Foreman from queuing/requeuing
# jobs created before this cutoff.
JOB_CREATED_AT_CUTOFF = datetime.datetime(2019, 9, 19, tzinfo=timezone.utc)
def read_config_list(config_file: str) -> List[str]:
"""
Reads a file and returns a list with one item per line.
"""
return_list = []
with open(config_file) as config_list:
for line in config_list:
return_list.append(line.strip())
return return_list
# These are lists of accessions that are for
# subsets of experiments that we want to prioritize.
PEDIATRIC_ACCESSION_LIST = read_config_list("config/pediatric_accessions.txt")
HGU133PLUS2_ACCESSION_LIST = read_config_list("config/hgu133plus2_accessions.txt")
##
# Utilities
##
def handle_repeated_failure(job) -> None:
"""If a job fails too many times, log it and stop retrying."""
# Not strictly retried but will prevent the job from getting
# retried any more times.
job.retried = True
# success may already be False, but if it was a hung or lost job
# this will ensure it's marked as failed.
job.success = False
job.save()
# At some point this should become more noisy/attention
# grabbing. However for the time being just logging should be
# sufficient because all log messages will be closely monitored
# during early testing stages.
logger.warn(
"%s #%d failed %d times!!!",
job.__class__.__name__,
job.id,
MAX_NUM_RETRIES + 1,
failure_reason=job.failure_reason,
)
def update_volume_work_depth(window=datetime.timedelta(minutes=2)):
"""When a new job is created our local idea of the work depth is updated, but every so often
we refresh from Nomad how many jobs were stopped or killed"""
global VOLUME_WORK_DEPTH
global TIME_OF_LAST_WORK_DEPTH_CHECK
if timezone.now() - TIME_OF_LAST_WORK_DEPTH_CHECK > window:
# Reset the work depth dict in case a volume was removed since the last iteration
VOLUME_WORK_DEPTH = dict()
breakdown = get_nomad_jobs_breakdown()
# Loop through all active volumes, which are the keys to the
# fields aggregated by volume
for volume_index in get_active_volumes():
if volume_index in breakdown["nomad_pending_jobs_by_volume"]:
VOLUME_WORK_DEPTH[volume_index] = (
breakdown["nomad_pending_jobs_by_volume"][volume_index]
+ breakdown["nomad_running_jobs_by_volume"][volume_index]
)
else:
# There are no nomad jobs currently queued for the
# volume index, so set its work depth is 0.
VOLUME_WORK_DEPTH[volume_index] = 0
TIME_OF_LAST_WORK_DEPTH_CHECK = timezone.now()
def get_emptiest_volume() -> str:
# This should never get returned, because get_emptiest_volume() should only be called when there
# is one or more volumes with a work depth smaller than the DESIRED_WORK_DEPTH
emptiest_volume = {"index": "0", "work_depth": DESIRED_WORK_DEPTH}
for volume, work_depth in VOLUME_WORK_DEPTH.items():
if work_depth < emptiest_volume["work_depth"]:
emptiest_volume["index"] = volume
emptiest_volume["work_depth"] = work_depth
return emptiest_volume["index"]
##
# Job Prioritization
##
def prioritize_salmon_jobs(jobs: List) -> List:
"""Prioritizes salmon experiments based on how close to completion they are.
This is because salmon experiments have a final processing step
that must be performed on all the samples in the experiment, so if
9/10 samples in an experiment are processed then they can't
actually be used until that last sample is processed.
"""
# The strategy for doing so is to build a mapping between every
# salmon job in `jobs` to a priority. This priority will be what
# percentage of the samples in this experiment have been
# processed. Once we have that mapping we can sort those jobs by
# that priority and move them to the front of the list.
prioritized_jobs = []
for job in jobs:
try:
if type(job) == ProcessorJob and not does_processor_job_have_samples(job):
continue
# Salmon jobs are specifc to one sample.
sample = job.get_samples().pop()
# Skip jobs that aren't for Salmon. Handle both ProcessorJobs and DownloaderJobs.
if type(job) is ProcessorJob and job.pipeline_applied != ProcessorPipeline.SALMON.value:
continue
elif type(job) is DownloaderJob:
is_salmon_sample = False
for original_file in sample.original_files.all():
if is_file_rnaseq(original_file.filename):
is_salmon_sample = True
if not is_salmon_sample:
continue
# Get a set of unique samples that share at least one
# experiment with the sample this job is for.
related_samples = set()
for experiment in sample.experiments.all():
for related_sample in experiment.samples.all():
related_samples.add(related_sample)
# We cannot simply filter on is_processed because that field
# doesn't get set until every sample in an experiment is processed.
# Instead we are looking for one successful processor job.
processed_samples = 0
for related_sample in related_samples:
original_files = related_sample.original_files
if original_files.count() == 0:
logger.error(
"Salmon sample found without any original files!!!", sample=related_sample
)
elif original_files.first().processor_jobs.filter(success=True).count() >= 1:
processed_samples += 1
experiment_completion_percent = processed_samples / len(related_samples)
prioritized_jobs.append({"job": job, "priority": experiment_completion_percent})
except:
logger.debug("Exception caught while prioritizing salmon jobs!", job=job)
sorted_job_mappings = sorted(prioritized_jobs, reverse=True, key=lambda k: k["priority"])
sorted_jobs = [job_mapping["job"] for job_mapping in sorted_job_mappings]
# Remove all the jobs we're moving to the front of the list
for job in sorted_jobs:
jobs.remove(job)
return sorted_jobs + jobs
def prioritize_zebrafish_jobs(jobs: List) -> List:
"""Moves zebrafish jobs to the beginnging of the input list."""
zebrafish_jobs = []
for job in jobs:
try:
if type(job) == ProcessorJob and not does_processor_job_have_samples(job):
continue
# There aren't cross-species jobs, so just checking one sample's organism will be sufficient.
samples = job.get_samples()
for sample in samples:
if sample.organism.name == "DANIO_RERIO":
zebrafish_jobs.append(job)
break
except:
logger.debug("Exception caught while prioritizing zebrafish jobs!", job=job)
# Remove all the jobs we're moving to the front of the list
for job in zebrafish_jobs:
jobs.remove(job)
return zebrafish_jobs + jobs
def prioritize_jobs_by_accession(jobs: List, accession_list: List[str]) -> List:
"""Moves jobs whose accessions are in accession_lst to the beginning of the input list."""
prioritized_jobs = []
for job in jobs:
try:
if type(job) == ProcessorJob and not does_processor_job_have_samples(job):
continue
# All samples in a job correspond to the same experiment, so just check one sample.
samples = job.get_samples()
# Iterate through all the samples' experiments until one is
# found with an accession in `accession_list`.
is_prioritized_job = False
for sample in samples:
if is_prioritized_job:
# We found one! So stop looping
break
for experiment in sample.experiments.all():
if experiment.accession_code in accession_list:
prioritized_jobs.append(job)
is_prioritized_job = True
break
except:
logger.exception("Exception caught while prioritizing jobs by accession!", job=job)
# Remove all the jobs we're moving to the front of the list
for job in prioritized_jobs:
jobs.remove(job)
return prioritized_jobs + jobs
##
# Downloaders
##
def requeue_downloader_job(last_job: DownloaderJob) -> (bool, str):
"""Queues a new downloader job.
The new downloader job will have num_retries one greater than
last_job.num_retries.
Returns True and the volume index of the downloader job upon successful dispatching,
False and an empty string otherwise.
"""
num_retries = last_job.num_retries + 1
ram_amount = last_job.ram_amount
# If there's no start time then it's likely that the instance got
# cycled which means we didn't get OOM-killed, so we don't need to
# increase the RAM amount.
if last_job.start_time and last_job.failure_reason is None:
if ram_amount == 1024:
ram_amount = 4096
elif ram_amount == 4096:
ram_amount = 16384
original_file = last_job.original_files.first()
if not original_file:
last_job.no_retry = True
last_job.success = False
last_job.failure_reason = (
"Foreman told to requeue a DownloaderJob without an OriginalFile - why?!"
)
last_job.save()
logger.info(
"Foreman told to requeue a DownloaderJob without an OriginalFile - why?!",
last_job=str(last_job),
)
return False, ""
if not original_file.needs_processing():
last_job.no_retry = True
last_job.success = False
last_job.failure_reason = "Foreman told to redownload job with prior successful processing."
last_job.save()
logger.info(
"Foreman told to redownload job with prior successful processing.",
last_job=str(last_job),
)
return False, ""
first_sample = original_file.samples.first()
# This is a magic string that all the dbGaP studies appear to have
if first_sample and ("in the dbGaP study" in first_sample.title):
last_job.no_retry = True
last_job.success = False
last_job.failure_reason = "Sample is dbGaP access controlled."
last_job.save()
logger.info(
"Avoiding requeuing for DownloaderJob for dbGaP run accession: "
+ str(first_sample.accession_code)
)
return False, ""
new_job = DownloaderJob(
num_retries=num_retries,
downloader_task=last_job.downloader_task,
ram_amount=ram_amount,
accession_code=last_job.accession_code,
was_recreated=last_job.was_recreated,
volume_index=get_emptiest_volume(),
)
new_job.save()
for original_file in last_job.original_files.all():
DownloaderJobOriginalFileAssociation.objects.get_or_create(
downloader_job=new_job, original_file=original_file
)
logger.debug(
"Requeuing Downloader Job which had ID %d with a new Downloader Job with ID %d.",
last_job.id,
new_job.id,
)
try:
if send_job(Downloaders[last_job.downloader_task], job=new_job, is_dispatch=True):
last_job.retried = True
last_job.success = False
last_job.retried_job = new_job
last_job.save()
else:
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
return False, ""
except:
logger.error(
"Failed to requeue Downloader Job which had ID %d with a new Downloader Job with ID %d.",
last_job.id,
new_job.id,
)
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
return False, ""
return True, new_job.volume_index
def count_downloader_jobs_in_queue(window=datetime.timedelta(minutes=2)) -> int:
"""Counts how many downloader jobs in the Nomad queue do not have status of 'dead'."""
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
global TIME_OF_LAST_DOWNLOADER_JOB_CHECK
global DOWNLOADER_JOBS_IN_QUEUE
if timezone.now() - TIME_OF_LAST_DOWNLOADER_JOB_CHECK > window:
try:
all_downloader_jobs = nomad_client.jobs.get_jobs(prefix="DOWNLOADER")
total = 0
for job in all_downloader_jobs:
if job["ParameterizedJob"] and job["JobSummary"].get("Children", None):
total = total + job["JobSummary"]["Children"]["Pending"]
total = total + job["JobSummary"]["Children"]["Running"]
DOWNLOADER_JOBS_IN_QUEUE = total
except:
# Nomad is down, return an impossibly high number to prevent
# additonal queuing from happening:
DOWNLOADER_JOBS_IN_QUEUE = sys.maxsize
TIME_OF_LAST_DOWNLOADER_JOB_CHECK = timezone.now()
return DOWNLOADER_JOBS_IN_QUEUE
def get_capacity_for_downloader_jobs() -> int:
"""Returns how many downloader jobs the queue has capacity for.
"""
update_volume_work_depth()
total_capacity = 0
for work_depth in VOLUME_WORK_DEPTH.values():
if work_depth >= DESIRED_WORK_DEPTH:
continue
total_capacity += DESIRED_WORK_DEPTH - work_depth
downloader_jobs_in_queue = count_downloader_jobs_in_queue()
if downloader_jobs_in_queue + total_capacity >= HARD_MAX_DOWNLOADER_JOBS:
return HARD_MAX_DOWNLOADER_JOBS - downloader_jobs_in_queue
return total_capacity
def handle_downloader_jobs(jobs: List[DownloaderJob]) -> None:
"""For each job in jobs, either retry it or log it.
No more than queue_capacity jobs will be retried.
"""
global VOLUME_WORK_DEPTH
global DOWNLOADER_JOBS_IN_QUEUE
queue_capacity = get_capacity_for_downloader_jobs()
jobs_dispatched = 0
for count, job in enumerate(jobs):
if jobs_dispatched >= queue_capacity:
logger.info(
"We hit the maximum downloader jobs / capacity ceiling, so we're not handling any more downloader jobs now."
)
return
if job.num_retries < MAX_NUM_RETRIES:
requeue_success, dispatched_volume = requeue_downloader_job(job)
if requeue_success:
jobs_dispatched = jobs_dispatched + 1
VOLUME_WORK_DEPTH[dispatched_volume] += 1
DOWNLOADER_JOBS_IN_QUEUE += 1
else:
handle_repeated_failure(job)
def retry_failed_downloader_jobs() -> None:
"""Handle downloader jobs that were marked as a failure."""
failed_jobs = (
DownloaderJob.failed_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_downloader_jobs()
paginator = Paginator(failed_jobs, PAGE_SIZE, "created_at")
page = paginator.page()
page_count = 0
if queue_capacity <= 0:
logger.info(
"Not handling failed (explicitly-marked-as-failure) downloader jobs "
"because there is no capacity for them."
)
while queue_capacity > 0:
logger.info(
"Handling page %d of failed (explicitly-marked-as-failure) downloader jobs!", page_count
)
handle_downloader_jobs(page.object_list)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_downloader_jobs()
else:
break
def retry_hung_downloader_jobs() -> None:
"""Retry downloader jobs that were started but never finished."""
potentially_hung_jobs = (
DownloaderJob.hung_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_downloader_jobs()
if queue_capacity <= 0:
logger.info(
"Not handling failed (explicitly-marked-as-failure) downloader jobs "
"because there is no capacity for them."
)
paginator = Paginator(potentially_hung_jobs, PAGE_SIZE, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
hung_jobs = []
for job in page.object_list:
try:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
if job_status != "running":
# Make sure it didn't finish since our original query.
job.refresh_from_db()
if job.end_time is None:
hung_jobs.append(job)
except URLNotFoundNomadException:
hung_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception(
"Couldn't query Nomad about Downloader Job.", downloader_job=job.id
)
if hung_jobs:
logger.info(
"Handling page %d of hung (started-but-never-finished) downloader jobs!",
page_count,
jobs_count=len(hung_jobs),
)
handle_downloader_jobs(hung_jobs)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_downloader_jobs()
else:
break
def retry_lost_downloader_jobs() -> None:
"""Retry downloader jobs that went too long without being started.
Idea: at some point this function could integrate with the spot
instances to determine if jobs are hanging due to a lack of
instances. A naive time-based implementation like this could end
up retrying every single queued job if there were a long period
during which the price of spot instance is higher than our bid
price.
"""
global VOLUME_WORK_DEPTH
global DOWNLOADER_JOBS_IN_QUEUE
potentially_lost_jobs = (
DownloaderJob.lost_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_downloader_jobs()
if queue_capacity <= 0:
logger.info(
"Not handling failed (explicitly-marked-as-failure) downloader jobs "
"because there is no capacity for them."
)
paginator = Paginator(potentially_lost_jobs, PAGE_SIZE, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
lost_jobs = []
jobs_queued_from_this_page = 0
for job in page.object_list:
try:
if job.nomad_job_id:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
# If the job is still pending, then it makes sense that it
# hasn't started and if it's running then it may not have
# been able to mark the job record as started yet.
if job_status != "pending" and job_status != "running":
logger.debug(
(
"Determined that a downloader job needs to be requeued because its"
" Nomad Job's status is: %s."
),
job_status,
job_id=job.id,
)
lost_jobs.append(job)
elif jobs_queued_from_this_page < queue_capacity:
# The job never got put in the Nomad queue, no
# need to recreate it, we just gotta queue it up!
job.volume_index = get_emptiest_volume()
job.save()
send_job(Downloaders[job.downloader_task], job=job, is_dispatch=True)
jobs_queued_from_this_page += 1
VOLUME_WORK_DEPTH[job.volume_index] += 1
except socket.timeout:
logger.info("Timeout connecting to Nomad - is Nomad down?", job_id=job.id)
except URLNotFoundNomadException:
logger.debug(
(
"Determined that a downloader job needs to be requeued because "
"querying for its Nomad job failed: "
),
job_id=job.id,
)
lost_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception(
"Couldn't query Nomad about Downloader Job.", downloader_job=job.id
)
if lost_jobs and get_capacity_for_downloader_jobs() > 0:
logger.info(
"Handling page %d of lost (never-started) downloader jobs!",
page_count,
len_jobs=len(lost_jobs),
)
handle_downloader_jobs(lost_jobs)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_downloader_jobs()
else:
break
##
# Processors
##
def requeue_processor_job(last_job: ProcessorJob) -> None:
"""Queues a new processor job.
The new processor job will have num_retries one greater than
last_job.num_retries.
"""
num_retries = last_job.num_retries + 1
# The Salmon pipeline is quite RAM-sensitive.
# Try it again with an increased RAM amount, if possible.
new_ram_amount = last_job.ram_amount
# If there's no start time then it's likely that the instance got
# cycled which means we didn't get OOM-killed, so we don't need to
# increase the RAM amount.
if last_job.start_time:
# These initial values are set in common/job_lookup.py:determine_ram_amount
if (
last_job.pipeline_applied == "SALMON"
or last_job.pipeline_applied == "TXIMPORT"
or last_job.pipeline_applied.startswith("TRANSCRIPTOME")
):
if new_ram_amount == 4096:
new_ram_amount = 8192
if new_ram_amount == 8192:
new_ram_amount = 12288
elif new_ram_amount == 12288:
new_ram_amount = 16384
elif new_ram_amount == 16384:
new_ram_amount = 32768
elif new_ram_amount == 32768:
new_ram_amount = 65536
# The AFFY pipeline is somewhat RAM-sensitive.
# Also NO_OP can fail and be retried, so we want to attempt ramping up ram.
# Try it again with an increased RAM amount, if possible.
elif last_job.pipeline_applied == "AFFY_TO_PCL" or last_job.pipeline_applied == "NO_OP":
if new_ram_amount == 2048:
new_ram_amount = 4096
elif new_ram_amount == 4096:
new_ram_amount = 8192
elif new_ram_amount == 8192:
new_ram_amount = 32768
volume_index = last_job.volume_index
# Make sure volume_index is set to something, unless it's a
# smasher job type because the smasher instance doesn't have a
# volume_index.
if (not volume_index or volume_index == "-1") and ProcessorPipeline[
last_job.pipeline_applied
] not in SMASHER_JOB_TYPES:
active_volumes = get_active_volumes()
if len(active_volumes) < 1 or not settings.RUNNING_IN_CLOUD:
logger.debug("No active volumes to requeue processor job.", job_id=last_job.id)
return
else:
volume_index = random.choice(list(active_volumes))
new_job = ProcessorJob(
num_retries=num_retries,
pipeline_applied=last_job.pipeline_applied,
ram_amount=new_ram_amount,
volume_index=volume_index,
)
new_job.save()
for original_file in last_job.original_files.all():
ProcessorJobOriginalFileAssociation.objects.get_or_create(
processor_job=new_job, original_file=original_file
)
for dataset in last_job.datasets.all():
ProcessorJobDatasetAssociation.objects.get_or_create(processor_job=new_job, dataset=dataset)
try:
logger.debug(
"Requeuing Processor Job which had ID %d with a new Processor Job with ID %d.",
last_job.id,
new_job.id,
)
if send_job(ProcessorPipeline[last_job.pipeline_applied], job=new_job, is_dispatch=True):
last_job.retried = True
last_job.success = False
last_job.retried_job = new_job
last_job.save()
else:
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
except:
logger.warn(
"Failed to requeue Processor Job which had ID %d with a new Processor Job with ID %d.",
last_job.id,
new_job.id,
exc_info=1,
)
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
def get_capacity_for_processor_jobs(nomad_client) -> bool:
"""Returns how many processor jobs the queue has capacity for.
"""
# Maximum number of total jobs running at a time.
# We do this now rather than import time for testing purposes.
MAX_TOTAL_JOBS = int(get_env_variable_gracefully("MAX_TOTAL_JOBS", DEFAULT_MAX_JOBS))
len_all_jobs = len(nomad_client.jobs.get_jobs())
return MAX_TOTAL_JOBS - len_all_jobs
def handle_processor_jobs(
jobs: List[ProcessorJob], queue_capacity: int = None, ignore_ceiling=False
) -> None:
"""For each job in jobs, either retry it or log it.
No more than queue_capacity jobs will be retried.
"""
# Maximum number of total jobs running at a time.
# We do this now rather than import time for testing purposes.
if queue_capacity is None:
queue_capacity = int(get_env_variable_gracefully("MAX_TOTAL_JOBS", DEFAULT_MAX_JOBS))
# We want zebrafish data first, then hgu133plus2, then data
# related to pediatric cancer, then to finish salmon experiments
# that are close to completion.
# Each function moves the jobs it prioritizes to the front of the
# list, so apply them in backwards order.
# jobs = prioritize_salmon_jobs(jobs)
# jobs = prioritize_jobs_by_accession(jobs, PEDIATRIC_ACCESSION_LIST)
# jobs = prioritize_jobs_by_accession(jobs, HGU133PLUS2_ACCESSION_LIST)
# jobs = prioritize_zebrafish_jobs(jobs)
jobs_dispatched = 0
for count, job in enumerate(jobs):
if not ignore_ceiling and jobs_dispatched >= queue_capacity:
logger.info(
"We hit the maximum total jobs ceiling, so we're not handling any more processor jobs now."
)
return
if job.num_retries < MAX_NUM_RETRIES:
requeue_processor_job(job)
jobs_dispatched = jobs_dispatched + 1
else:
handle_repeated_failure(job)
def retry_failed_processor_jobs() -> None:
"""Handle processor jobs that were marked as a failure.
Ignores Janitor jobs since they are queued every half hour anyway."""
try:
active_volumes = get_active_volumes()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
pass
failed_jobs = (
ProcessorJob.failed_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.filter(Q(volume_index__isnull=True) | Q(volume_index__in=active_volumes))
.exclude(pipeline_applied="JANITOR")
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
paginator = Paginator(failed_jobs, 200, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
logger.info(
"Handling page %d of failed (explicitly-marked-as-failure) processor jobs!", page_count
)
handle_processor_jobs(page.object_list, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
else:
break
def retry_hung_processor_jobs() -> None:
"""Retry processor jobs that were started but never finished.
Ignores Janitor jobs since they are queued every half hour anyway."""
try:
active_volumes = get_active_volumes()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
pass
potentially_hung_jobs = (
ProcessorJob.hung_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.filter(Q(volume_index__isnull=True) | Q(volume_index__in=active_volumes))
.exclude(pipeline_applied="JANITOR")
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
paginator = Paginator(potentially_hung_jobs, 200, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
hung_jobs = []
for job in page.object_list:
try:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
if job_status != "running":
# Make sure it didn't finish since our original query.
job.refresh_from_db()
if job.end_time is None:
hung_jobs.append(job)
except URLNotFoundNomadException:
hung_jobs.append(job)
except TypeError:
# Almost certainly a python-nomad issue:
# File "/usr/local/lib/python3.5/dist-packages/nomad/api/job.py", line 63, in get_job
# return self.request(id, method="get").json()
# File "/usr/local/lib/python3.5/dist-packages/nomad/api/base.py", line 74, in request
# endpoint = self._endpoint_builder(self.ENDPOINT, *args)
# File "/usr/local/lib/python3.5/dist-packages/nomad/api/base.py", line 28, in _endpoint_builder
# u = "/".join(args)
# TypeError: sequence item 1: expected str instance, NoneType found
logger.info("Couldn't query Nomad about Processor Job.", processor_job=job.id)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception("Couldn't query Nomad about Processor Job.", processor_job=job.id)
if hung_jobs:
logger.info(
"Handling hung page %d of (started-but-never-finished) processor jobs!",
page_count,
len_jobs=len(hung_jobs),
)
handle_processor_jobs(hung_jobs, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
else:
break
def retry_lost_processor_jobs() -> None:
"""Retry processor jobs which never even got started for too long.
Ignores Janitor jobs since they are queued every half hour anyway."""
try:
active_volumes = get_active_volumes()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
pass
potentially_lost_jobs = (
ProcessorJob.lost_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.filter(Q(volume_index__isnull=True) | Q(volume_index__in=active_volumes))
.exclude(pipeline_applied="JANITOR")
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=5)
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
paginator = Paginator(potentially_lost_jobs, 200, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
lost_jobs = []
for job in page.object_list:
try:
if job.nomad_job_id:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
# If the job is still pending, then it makes sense that it
# hasn't started and if it's running then it may not have
# been able to mark the job record as started yet.
if job_status != "pending" and job_status != "running":
logger.debug(
(
"Determined that a processor job needs to be requeued because its"
" Nomad Job's status is: %s."
),
job_status,
job_id=job.id,
)
lost_jobs.append(job)
else:
# If there is no nomad_job_id field set, we could be
# in the small window where the job was created but
# hasn't yet gotten a chance to be queued.
# If this job really should be restarted we'll get it in the next loop.
if timezone.now() - job.created_at > MIN_LOOP_TIME:
lost_jobs.append(job)
except URLNotFoundNomadException:
logger.debug(
(
"Determined that a processor job needs to be requeued because "
"querying for its Nomad job failed: "
),
job_id=job.id,
)
lost_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception("Couldn't query Nomad about Processor Job.", processor_job=job.id)
if lost_jobs:
logger.info(
"Handling lost page %d of (never-started) processor jobs!",
page_count,
len_jobs=len(lost_jobs),
)
handle_processor_jobs(lost_jobs, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
else:
break
##
# Surveyors
##
def requeue_survey_job(last_job: SurveyJob) -> None:
"""Queues a new survey job.
The new survey job will have num_retries one greater than
last_job.num_retries.
"""
num_retries = last_job.num_retries + 1
new_job = SurveyJob(num_retries=num_retries, source_type=last_job.source_type)
if new_job.num_retries == 1:
new_job.ram_amount = 4096
elif new_job.num_retries in [2, 3]:
new_job.ram_amount = 16384
else:
new_job.ram_amount = 256
new_job.save()
keyvalues = SurveyJobKeyValue.objects.filter(survey_job=last_job)
for keyvalue in keyvalues:
SurveyJobKeyValue.objects.get_or_create(
survey_job=new_job, key=keyvalue.key, value=keyvalue.value,
)
logger.debug(
"Requeuing SurveyJob which had ID %d with a new SurveyJob with ID %d.",
last_job.id,
new_job.id,
)
try:
if send_job(SurveyJobTypes.SURVEYOR, job=new_job, is_dispatch=True):
last_job.retried = True
last_job.success = False
last_job.retried_job = new_job
last_job.save()
else:
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
except:
logger.error(
"Failed to requeue Survey Job which had ID %d with a new Surevey Job with ID %d.",
last_job.id,
new_job.id,
)
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
return True
def get_capacity_for_survey_jobs(nomad_client) -> bool:
"""Returns how many survey jobs the queue has capacity for.
"""
# Maximum number of total jobs running at a time.
# We do this now rather than import time for testing purposes.
MAX_TOTAL_JOBS = int(get_env_variable_gracefully("MAX_TOTAL_JOBS", DEFAULT_MAX_JOBS))
len_all_jobs = len(nomad_client.jobs.get_jobs())
return MAX_TOTAL_JOBS - len_all_jobs
def handle_survey_jobs(jobs: List[SurveyJob], queue_capacity: int = None) -> None:
"""For each job in jobs, either retry it or log it.
No more than queue_capacity jobs will be retried.
"""
# Maximum number of total jobs running at a time.
# We do this now rather than import time for testing purposes.
if queue_capacity is None:
queue_capacity = int(get_env_variable_gracefully("MAX_TOTAL_JOBS", DEFAULT_MAX_JOBS))
jobs_dispatched = 0
for count, job in enumerate(jobs):
if jobs_dispatched >= queue_capacity:
logger.info(
"We hit the maximum total jobs ceiling, so we're not handling any more survey jobs now."
)
return
if job.num_retries < MAX_NUM_RETRIES:
requeue_survey_job(job)
jobs_dispatched = jobs_dispatched + 1
else:
handle_repeated_failure(job)
def retry_failed_survey_jobs() -> None:
"""Handle survey jobs that were marked as a failure."""
failed_jobs = SurveyJob.failed_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF).order_by(
"pk"
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
paginator = Paginator(failed_jobs, 200)
page = paginator.page()
page_count = 0
while queue_capacity > 0:
logger.info(
"Handling page %d of failed (explicitly-marked-as-failure) survey jobs!", page_count
)
handle_survey_jobs(page.object_list, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
else:
break
def retry_hung_survey_jobs() -> None:
"""Retry survey jobs that were started but never finished."""
potentially_hung_jobs = SurveyJob.hung_objects.filter(
created_at__gt=JOB_CREATED_AT_CUTOFF
).order_by("pk")
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
paginator = Paginator(potentially_hung_jobs, 200)
page = paginator.page()
page_count = 0
while queue_capacity > 0:
hung_jobs = []
for job in page.object_list:
try:
# Surveyor jobs didn't always have nomad_job_ids. If they
# don't have one then by this point they've definitely died.
if job.nomad_job_id:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
else:
job_status = "absent"
if job_status != "running":
# Make sure it didn't finish since our original query.
job.refresh_from_db()
if job.end_time is None:
hung_jobs.append(job)
except URLNotFoundNomadException:
hung_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception("Couldn't query Nomad about SurveyJob Job.", survey_job=job.id)
if hung_jobs:
logger.info(
"Handling page %d of hung (started-but-never-finished) survey jobs!",
page_count,
len_jobs=len(hung_jobs),
)
handle_survey_jobs(hung_jobs, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
else:
break
def retry_lost_survey_jobs() -> None:
"""Retry survey jobs which never even got started for too long."""
potentially_lost_jobs = SurveyJob.lost_objects.filter(
created_at__gt=JOB_CREATED_AT_CUTOFF
).order_by("pk")
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
paginator = Paginator(potentially_lost_jobs, 200)
page = paginator.page()
page_count = 0
while queue_capacity > 0:
lost_jobs = []
for job in page.object_list:
try:
# Surveyor jobs didn't always have nomad_job_ids. If they
# don't have one then by this point they've definitely died.
if job.nomad_job_id:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
else:
job_status = "absent"
# If the job is still pending, then it makes sense that it
# hasn't started and if it's running then it may not have
# been able to mark the job record as started yet.
if job_status != "pending" and job_status != "running":
logger.debug(
(
"Determined that a survey job needs to be requeued because its"
" Nomad Job's status is: %s."
),
job_status,
job_id=job.id,
)
lost_jobs.append(job)
except URLNotFoundNomadException:
logger.debug(
(
"Determined that a survey job needs to be requeued because "
"querying for its Nomad job failed."
),
job_id=job.id,
)
lost_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception("Couldn't query Nomad about Processor Job.", survey_job=job.id)
if lost_jobs:
logger.info(
"Handling page %d of lost (never-started) survey jobs!",
page_count,
len_jobs=len(lost_jobs),
)
handle_survey_jobs(lost_jobs, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
else:
break
##
# Janitor
##
def send_janitor_jobs():
"""Dispatch a Janitor job for each instance in the cluster"""
try:
active_volumes = get_active_volumes()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
pass
# Clean up the smasher:
active_volumes.add(None)
for volume_index in active_volumes:
new_job = ProcessorJob(
num_retries=0, pipeline_applied="JANITOR", ram_amount=2048, volume_index=volume_index
)
new_job.save()
logger.info("Sending Janitor with index: ", job_id=new_job.id, index=volume_index)
try:
send_job(ProcessorPipeline["JANITOR"], job=new_job, is_dispatch=True)
except Exception:
# If we can't dispatch this job, something else has gone wrong.
continue
##
# Handling of node cycling
##
def cleanup_the_queue():
"""This cleans up any jobs which cannot currently be queued.
We often have more volumes than instances because we have enough
volumes for the scenario where the entire cluster is using the
smallest instance type, however that doesn't happen very
often. Therefore it's possible for some volumes to not be mounted,
which means that jobs which are constrained to run on instances
with those volumes cannot be placed and just clog up the queue.
Therefore we clear out jobs of that type every once in a while so
our queue is dedicated to jobs that can actually be placed.
"""
logger.info("Removing all jobs from Nomad queue whose volumes are not mounted.")
DOWNLOADER = "DOWNLOADER"
# Smasher and QN Reference jobs aren't tied to a specific EBS volume.
indexed_job_types = [
pipeline.value for pipeline in ProcessorPipeline if pipeline not in SMASHER_JOB_TYPES
]
# Special case for downloader jobs because they only have one
# nomad job type for all downloader tasks.
indexed_job_types.append(DOWNLOADER)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
try:
active_volumes = get_active_volumes()
jobs = nomad_client.jobs.get_jobs()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
logger.warn("Couldn't query Nomad about current jobs.", exc_info=1)
return
logger.info(
(
"These are the currently active volumes. Jobs for "
"other volumes will now be removed from the Nomad queue."
),
active_volumes=active_volumes,
)
num_jobs_killed = 0
for job in jobs:
# Skip over the Parameterized Jobs because we need those to
# always be running.
if "ParameterizedJob" not in job or job["ParameterizedJob"]:
continue
# We're only concerned with jobs that have to be tied to a volume index.
if "ParentID" not in job:
continue
# ensure the job is one of the indexed_job_types
job_type = None
for pipeline in indexed_job_types:
if job["ParentID"].startswith(pipeline):
job_type = pipeline
break
if not job_type:
continue
# If this job has an index, then its ParentID will
# have the pattern of <job-type>_<index>_<RAM-amount>
# and we want to check the value of <index>:
split_parent_id = job["ParentID"].split("_")
if len(split_parent_id) < 2:
continue
else:
index = split_parent_id[-2]
if index not in active_volumes:
# The index for this job isn't currently mounted, kill the job
# `num_retries` will be decremented when the job receives the SIGKILL
try:
nomad_client.job.deregister_job(job["ID"], purge=True)
logger.info(
"Foreman Killed nomad job because it had a volume that was not active",
nomad_job_id=job["ID"],
job_type=job_type,
)
num_jobs_killed += 1
except:
logger.exception(
"Could not remove Nomad job from the Nomad queue.",
nomad_job_id=job["ID"],
job_type=job_type,
)
# If we can't do this for some reason, we'll get it next loop.
logger.info("Removed %d jobs from the Nomad queue.", num_jobs_killed)
def clean_database():
""" Removes duplicated objects that may have appeared through race, OOM, bugs, etc.
See: https://github.com/AlexsLemonade/refinebio/issues/1183
"""
# Hide smashable files
computed_files = ComputedFile.objects.filter(s3_bucket=None, s3_key=None, is_smashable=True)
logger.info("Cleaning unsynced files!", num_to_clean=computed_files.count())
# We don't do this in bulk because we want the properties set by save() as well
for computed_file in computed_files:
computed_file.is_public = False
computed_file.save()
logger.info("Cleaned files!")
##
# Main loop
##
def monitor_jobs():
"""Main Foreman thread that helps manage the Nomad job queue.
Will find jobs that failed, hung, or got lost and requeue them.
Also will queue up Janitor jobs regularly to free up disk space.
Also cleans jobs out of the Nomad queue which cannot be queued
because the volume containing the job's data isn't mounted.
It does so on a loop forever that won't spin faster than
MIN_LOOP_TIME, but it may spin slower than that.
"""
last_janitorial_time = timezone.now()
last_dbclean_time = timezone.now()
while True:
# Perform two heartbeats, one for the logs and one for Monit:
logger.info("The Foreman's heart is beating, but he does not feel.")
# Write the health file for Monit to check
now_secs = int(time.time())
with open("/tmp/foreman_last_time", "w") as timefile:
timefile.write(str(now_secs))
start_time = timezone.now()
# Requeue jobs of each failure class for each job type.
# The order of processor -> downloader -> surveyor is intentional.
# Processors go first so we process data sitting on disk.
# Downloaders go first so we actually queue up the jobs in the database.
# Surveyors go last so we don't end up with tons and tons of unqueued jobs.
requeuing_functions_in_order = [
retry_failed_processor_jobs,
retry_hung_processor_jobs,
retry_lost_processor_jobs,
retry_failed_downloader_jobs,
retry_hung_downloader_jobs,
retry_lost_downloader_jobs,
retry_failed_survey_jobs,
retry_hung_survey_jobs,
retry_lost_survey_jobs,
]
for function in requeuing_functions_in_order:
try:
function()
except Exception:
logger.error("Caught exception in %s: ", function.__name__)
traceback.print_exc(chain=False)
if settings.RUNNING_IN_CLOUD:
if timezone.now() - last_janitorial_time > JANITOR_DISPATCH_TIME:
send_janitor_jobs()
cleanup_the_queue()
last_janitorial_time = timezone.now()
if timezone.now() - last_dbclean_time > DBCLEAN_TIME:
clean_database()
last_dbclean_time = timezone.now()
loop_time = timezone.now() - start_time
if loop_time < MIN_LOOP_TIME:
remaining_time = MIN_LOOP_TIME - loop_time
if remaining_time.seconds > 0:
time.sleep(remaining_time.seconds)
<|code_end|>
foreman/data_refinery_foreman/surveyor/geo.py
<|code_start|>import shutil
from re import sub
from typing import Dict, List
import dateutil.parser
import GEOparse
from data_refinery_common.job_lookup import Downloaders
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
Experiment,
ExperimentAnnotation,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
Organism,
OriginalFile,
OriginalFileSampleAssociation,
Sample,
SampleAnnotation,
SurveyJobKeyValue,
)
from data_refinery_common.utils import (
FileUtils,
get_normalized_platform,
get_readable_affymetrix_names,
get_supported_microarray_platforms,
get_supported_rnaseq_platforms,
)
from data_refinery_foreman.surveyor import harmony, utils
from data_refinery_foreman.surveyor.external_source import ExternalSourceSurveyor
logger = get_and_configure_logger(__name__)
GEOparse.logger.set_verbosity("WARN")
UNKNOWN = "UNKNOWN"
class GeoSurveyor(ExternalSourceSurveyor):
"""Surveys NCBI GEO for data.
Implements the ExternalSourceSurveyor interface.
"""
def source_type(self):
return Downloaders.GEO.value
def get_temp_path(self):
return "/tmp/" + str(self.survey_job.id) + "/"
def set_platform_properties(
self, sample_object: Sample, sample_metadata: Dict, gse: GEOparse.GSM
) -> Sample:
"""Sets platform-related properties on `sample_object`.
Uses metadata from `gse` to populate platform_name,
platform_accession_code, and technology on `sample_object`.
"""
# Determine platform information
external_accession = get_normalized_platform(gse.metadata.get("platform_id", [UNKNOWN])[0])
if external_accession == UNKNOWN:
sample_object.platform_accession_code = UNKNOWN
sample_object.platform_name = UNKNOWN
sample_object.manufacturer = UNKNOWN
# If this sample is Affy, we potentially can extract the
# platform information from the .CEL file. If it's not we
# can't do anything. Therefore assume the technology is
# microarray when we have no platform information.
sample_object.technology = "MICROARRAY"
return sample_object
platform_accession_code = UNKNOWN
gpl = GEOparse.get_GEO(
external_accession, destdir=self.get_temp_path(), how="brief", silent=True
)
platform_title = gpl.metadata.get("title", [UNKNOWN])[0]
# Check if this is a supported microarray platform.
for platform in get_supported_microarray_platforms():
if platform["external_accession"] == external_accession:
platform_accession_code = platform["platform_accession"]
if platform_accession_code != UNKNOWN:
# It's a supported microarray platform.
# We are using the brain array package as the platform accession code,
# so, for instance, GPL3213 becomes 'chicken'.
sample_object.platform_accession_code = platform_accession_code
sample_object.technology = "MICROARRAY"
try:
# Related: https://github.com/AlexsLemonade/refinebio/issues/354
# If it's Affy we can get a readable name:
sample_object.platform_name = get_readable_affymetrix_names()[
platform_accession_code
]
sample_object.manufacturer = "AFFYMETRIX"
# Sometimes Affymetrix samples have weird channel
# protocol metadata, so if we find that it's
# Affymetrix return it now. Example: GSE113945
return sample_object
except KeyError:
# Otherwise we'll use what we've got.
sample_object.platform_name = platform_title
# Determine manufacturer
platform = sample_object.pretty_platform.upper()
if "AGILENT" in platform:
sample_object.manufacturer = "AGILENT"
elif "ILLUMINA" in platform or "NEXTSEQ" in platform:
sample_object.manufacturer = "ILLUMINA"
elif "AFFYMETRIX" in platform:
sample_object.manufacturer = "AFFYMETRIX"
else:
sample_object.manufacturer = UNKNOWN
return sample_object
# Check to see if this is a supported RNASeq technology:
# GEO RNASeq platform titles often have organisms appended to
# an otherwise recognizable platform. The list of supported
# RNASeq platforms isn't long, so see if any of them are
# contained within what GEO gave us.
# Example: GSE69572 has a platform title of:
# 'Illumina Genome Analyzer IIx (Glycine max)'
# Which should really just be 'Illumina Genome Analyzer IIx'
# because RNASeq platforms are organism agnostic. However,
# the platforms 'Illumina Genome Analyzer' and 'Illumina
# Genome Analyzer II' would also be matched, so make sure that
# the longest platform names are tested first:
sorted_platform_list = get_supported_rnaseq_platforms().copy()
sorted_platform_list.sort(key=len, reverse=True)
for platform in sorted_platform_list:
if platform.upper() in platform_title.upper():
sample_object.technology = "RNA-SEQ"
sample_object.platform_name = platform
# We just use RNASeq platform titles as accessions
sample_object.platform_accession_code = platform
if "ILLUMINA" in sample_object.platform_name.upper():
sample_object.manufacturer = "ILLUMINA"
elif "NEXTSEQ" in sample_object.platform_name.upper():
sample_object.manufacturer = "NEXTSEQ"
elif "ION TORRENT" in sample_object.platform_name.upper():
sample_object.manufacturer = "ION_TORRENT"
else:
sample_object.manufacturer = UNKNOWN
return sample_object
# If we've made it this far, we don't know what this platform
# is, therefore we can't know what its technology is. What we
# do know is what GEO said was it's platform's accession and
# title are, and that it's unsupported.
sample_object.platform_name = platform_title
sample_object.platform_accession_code = external_accession
sample_object.technology = UNKNOWN
sample_object.manufacturer = UNKNOWN
return sample_object
def get_miniml_url(self, experiment_accession_code):
""" Build the URL for the MINiML files for this accession code.
ex:
'GSE68061' -> 'ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE68nnn/GSE68061/miniml/GSE68061_family.xml.tgz'
"""
geo = experiment_accession_code.upper()
geotype = geo[:3]
range_subdir = sub(r"\d{1,3}$", "nnn", geo)
min_url_template = (
"ftp://ftp.ncbi.nlm.nih.gov/geo/" "series/{range_subdir}/{record}/miniml/{record_file}"
)
min_url = min_url_template.format(
range_subdir=range_subdir, record=geo, record_file="%s_family.xml.tgz" % geo
)
return min_url
@staticmethod
def get_sample_protocol_info(sample_metadata, sample_accession_code):
protocol_info = dict()
if "extract_protocol_ch1" in sample_metadata:
protocol_info["Extraction protocol"] = sample_metadata["extract_protocol_ch1"]
if "label_protocol_ch1" in sample_metadata:
protocol_info["Label protocol"] = sample_metadata["label_protocol_ch1"]
if "hyb_protocol" in sample_metadata:
protocol_info["Hybridization protocol"] = sample_metadata["hyb_protocol"]
if "scan_protocol" in sample_metadata:
protocol_info["Scan protocol"] = sample_metadata["scan_protocol"]
if "data_processing" in sample_metadata:
protocol_info["Data processing"] = sample_metadata["data_processing"]
protocol_info["Reference"] = (
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + sample_accession_code
)
return protocol_info
@staticmethod
def _apply_harmonized_metadata_to_sample(sample: Sample, harmonized_metadata: dict):
"""Applies the harmonized metadata to `sample`"""
for key, value in harmonized_metadata.items():
setattr(sample, key, value)
@staticmethod
def _apply_metadata_to_experiment(experiment: Experiment, gse):
""" Gets the metadata out of gse and applies it to the experiment"""
experiment.source_url = (
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + experiment.accession_code
)
experiment.source_database = "GEO"
experiment.title = gse.metadata.get("title", [""])[0]
experiment.description = gse.metadata.get("summary", [""])[0]
# Source doesn't provide time information, assume midnight.
submission_date = gse.metadata["submission_date"][0] + " 00:00:00 UTC"
experiment.source_first_published = dateutil.parser.parse(submission_date)
last_updated_date = gse.metadata["last_update_date"][0] + " 00:00:00 UTC"
experiment.source_last_updated = dateutil.parser.parse(last_updated_date)
unique_institutions = list(set(gse.metadata["contact_institute"]))
experiment.submitter_institution = ", ".join(unique_institutions)
experiment.pubmed_id = gse.metadata.get("pubmed_id", [""])[0]
# Scrape publication title and authorship from Pubmed
if experiment.pubmed_id:
pubmed_metadata = utils.get_title_and_authors_for_pubmed_id(experiment.pubmed_id)
experiment.publication_title = pubmed_metadata[0]
experiment.publication_authors = pubmed_metadata[1]
def create_experiment_and_samples_from_api(
self, experiment_accession_code
) -> (Experiment, List[Sample]):
""" The main surveyor - find the Experiment and Samples from NCBI GEO.
Uses the GEOParse library, for which docs can be found here: https://geoparse.readthedocs.io/en/latest/usage.html#working-with-geo-objects
"""
# Cleaning up is tracked here: https://github.com/guma44/GEOparse/issues/41
gse = GEOparse.get_GEO(
experiment_accession_code, destdir=self.get_temp_path(), how="brief", silent=True
)
preprocessed_samples = harmony.preprocess_geo(gse.gsms.items())
harmonized_samples = harmony.harmonize(preprocessed_samples)
# Create the experiment object
try:
experiment_object = Experiment.objects.get(accession_code=experiment_accession_code)
logger.debug(
"Experiment %s already exists, skipping object creation.",
experiment_accession_code,
survey_job=self.survey_job.id,
)
except Experiment.DoesNotExist:
experiment_object = Experiment()
experiment_object.accession_code = experiment_accession_code
GeoSurveyor._apply_metadata_to_experiment(experiment_object, gse)
experiment_object.save()
experiment_annotation = ExperimentAnnotation()
experiment_annotation.data = gse.metadata
experiment_annotation.experiment = experiment_object
experiment_annotation.is_ccdl = False
experiment_annotation.save()
# Okay, here's the situation!
# Sometimes, samples have a direct single representation for themselves.
# Othertimes, there is a single file with references to every sample in it.
created_samples = []
for sample_accession_code, sample in gse.gsms.items():
try:
sample_object = Sample.objects.get(accession_code=sample_accession_code)
logger.debug(
"Sample %s from experiment %s already exists, skipping object creation.",
sample_accession_code,
experiment_object.accession_code,
survey_job=self.survey_job.id,
)
# Associate it with the experiment, but since it
# already exists it already has original files
# associated with it and it's already been downloaded,
# so don't add it to created_samples.
ExperimentSampleAssociation.objects.get_or_create(
experiment=experiment_object, sample=sample_object
)
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment_object, organism=sample_object.organism
)
except Sample.DoesNotExist:
organism = Organism.get_object_for_name(sample.metadata["organism_ch1"][0].upper())
sample_object = Sample()
sample_object.source_database = "GEO"
sample_object.accession_code = sample_accession_code
sample_object.organism = organism
# If data processing step, it isn't raw.
sample_object.has_raw = not sample.metadata.get("data_processing", None)
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment_object, organism=organism
)
sample_object.title = sample.metadata["title"][0]
self.set_platform_properties(sample_object, sample.metadata, gse)
GeoSurveyor._apply_harmonized_metadata_to_sample(
sample_object, harmonized_samples[sample_object.title]
)
# Sample-level protocol_info
sample_object.protocol_info = self.get_sample_protocol_info(
sample.metadata, sample_accession_code
)
sample_object.save()
logger.debug("Created Sample: " + str(sample_object))
sample_annotation = SampleAnnotation()
sample_annotation.sample = sample_object
sample_annotation.data = sample.metadata
sample_annotation.is_ccdl = False
sample_annotation.save()
sample_supplements = sample.metadata.get("supplementary_file", [])
for supplementary_file_url in sample_supplements:
# Why do they give us this?
if supplementary_file_url == "NONE":
break
# We never want these!
if "idat.gz" in supplementary_file_url.lower():
continue
if "chp.gz" in supplementary_file_url.lower():
continue
if "ndf.gz" in supplementary_file_url.lower():
continue
if "pos.gz" in supplementary_file_url.lower():
continue
if "pair.gz" in supplementary_file_url.lower():
continue
if "gff.gz" in supplementary_file_url.lower():
continue
# Sometimes, we are lied to about the data processing step.
lower_file_url = supplementary_file_url.lower()
if (
".cel" in lower_file_url
or ("_non_normalized.txt" in lower_file_url)
or ("_non-normalized.txt" in lower_file_url)
or ("-non-normalized.txt" in lower_file_url)
or ("-non_normalized.txt" in lower_file_url)
):
sample_object.has_raw = True
sample_object.save()
# filename and source_filename are the same for these
filename = FileUtils.get_filename(supplementary_file_url)
original_file = OriginalFile.objects.get_or_create(
source_url=supplementary_file_url,
filename=filename,
source_filename=filename,
has_raw=sample_object.has_raw,
is_archive=FileUtils.is_archive(filename),
)[0]
logger.debug("Created OriginalFile: " + str(original_file))
original_file_sample_association = OriginalFileSampleAssociation.objects.get_or_create(
original_file=original_file, sample=sample_object
)
if original_file.is_affy_data():
# Only Affymetrix Microarrays produce .CEL files
sample_object.technology = "MICROARRAY"
sample_object.manufacturer = "AFFYMETRIX"
sample_object.save()
# It's okay to survey RNA-Seq samples from GEO, but we
# don't actually want to download/process any RNA-Seq
# data unless it comes from SRA.
if sample_object.technology != "RNA-SEQ":
created_samples.append(sample_object)
# Now that we've determined the technology at the
# sample level, we can set it at the experiment level,
# just gotta make sure to only do it once. There can
# be more than one technology, this should be changed
# as part of:
# https://github.com/AlexsLemonade/refinebio/issues/1099
if not experiment_object.technology:
experiment_object.technology = sample_object.technology
experiment_object.save()
ExperimentSampleAssociation.objects.get_or_create(
experiment=experiment_object, sample=sample_object
)
# These supplementary files _may-or-may-not_ contain the type of raw data we can process.
for experiment_supplement_url in gse.metadata.get("supplementary_file", []):
# filename and source_filename are the same for these
filename = experiment_supplement_url.split("/")[-1]
original_file = OriginalFile.objects.get_or_create(
source_url=experiment_supplement_url,
filename=filename,
source_filename=filename,
has_raw=sample_object.has_raw,
is_archive=True,
)[0]
logger.debug("Created OriginalFile: " + str(original_file))
lower_supplement_url = experiment_supplement_url.lower()
if (
("_non_normalized.txt" in lower_supplement_url)
or ("_non-normalized.txt" in lower_supplement_url)
or ("-non-normalized.txt" in lower_supplement_url)
or ("-non_normalized.txt" in lower_supplement_url)
):
for sample_object in created_samples:
sample_object.has_raw = True
sample_object.save()
OriginalFileSampleAssociation.objects.get_or_create(
sample=sample_object, original_file=original_file
)
# Delete this Original file if it isn't being used.
if (
OriginalFileSampleAssociation.objects.filter(original_file=original_file).count()
== 0
):
original_file.delete()
# These are the Miniml/Soft/Matrix URLs that are always(?) provided.
# GEO describes different types of data formatting as "families"
family_url = self.get_miniml_url(experiment_accession_code)
miniml_original_file = OriginalFile.objects.get_or_create(
source_url=family_url,
source_filename=family_url.split("/")[-1],
has_raw=sample_object.has_raw,
is_archive=True,
)[0]
for sample_object in created_samples:
# We don't need a .txt if we have a .CEL
if sample_object.has_raw:
continue
OriginalFileSampleAssociation.objects.get_or_create(
sample=sample_object, original_file=miniml_original_file
)
# Delete this Original file if it isn't being used.
if (
OriginalFileSampleAssociation.objects.filter(original_file=miniml_original_file).count()
== 0
):
miniml_original_file.delete()
# Trash the temp path
try:
shutil.rmtree(self.get_temp_path())
except Exception:
# There was a problem during surveying so this didn't get created.
# It's not a big deal.
pass
return experiment_object, created_samples
def discover_experiment_and_samples(self) -> (Experiment, List[Sample]):
""" Dispatches the surveyor, returns the results """
experiment_accession_code = SurveyJobKeyValue.objects.get(
survey_job_id=self.survey_job.id, key__exact="experiment_accession_code"
).value
logger.debug(
"Surveying experiment with accession code: %s.",
experiment_accession_code,
survey_job=self.survey_job.id,
)
experiment, samples = self.create_experiment_and_samples_from_api(experiment_accession_code)
return experiment, samples
<|code_end|>
workers/data_refinery_workers/processors/illumina.py
<|code_start|>import csv
import multiprocessing
import os
import re
import subprocess
from typing import Dict
from django.utils import timezone
import numpy as np
import pandas as pd
from data_refinery_common.job_lookup import PipelineEnum, ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
Pipeline,
ProcessorJob,
ProcessorJobOriginalFileAssociation,
Sample,
SampleAnnotation,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
logger = get_and_configure_logger(__name__)
def _prepare_files(job_context: Dict) -> Dict:
"""Adds the keys "input_file_path" and "output_file_path" to
job_context so everything is prepared for processing.
"""
# All files for the job are in the same directory.
job_context["work_dir"] = (
LOCAL_ROOT_DIR + "/" + "processor_job_" + str(job_context["job_id"]) + "/"
)
os.makedirs(job_context["work_dir"], exist_ok=True)
original_file = job_context["original_files"][0]
sanitized_filename = original_file.absolute_file_path.split("/")[-1] + ".sanitized"
job_context["input_file_path"] = job_context["work_dir"] + sanitized_filename
new_filename = original_file.absolute_file_path.split("/")[-1].replace(".txt", ".PCL")
job_context["output_file_path"] = job_context["work_dir"] + new_filename
# Sanitize this file so R doesn't choke.
# Some have comments, some have non-comment-comments.
with open(original_file.absolute_file_path, "r") as file_input:
with open(job_context["input_file_path"], "w") as file_output:
for line in file_input:
if (
"#" not in line
and line.strip() != ""
and line != "\n"
and "\t" in line
and line[0:3].upper() != "GSM"
and line[0] != "'"
and line[0] != '"'
and line[0] != "!"
and line[0] != "/"
and line[0] != "<"
and line[0] != "\t"
):
file_output.write(line)
return job_context
def _detect_columns(job_context: Dict) -> Dict:
""" Detect which columns match to which inputs.
Related: https://github.com/AlexsLemonade/refinebio/issues/86#issuecomment-379308817
We need to find:
First column should be ID_REF or PROBE_ID and the type should be string.
Detection Pval column
Expression column (contains sample title and NOT 'BEAD')
Header examples:
['ID_REF', 'LV-C&si-Control-1', 'Detection Pval',
'LV-C&si-Control-2', 'Detection Pval', 'LV-C&si-Control-3', 'Detection
Pval', 'LV-C&si-EZH2-1', 'Detection Pval', 'LV-C&si-EZH2-2', 'Detection
Pval', 'LV-C&si-EZH2-3', 'Detection Pval', 'LV-EZH2&si-EZH2-1',
'Detection Pval', 'LV-EZH2&si-EZH2-2', 'Detection Pval', 'LV-EZH2&si-
EZH2-3', 'Detection Pval', 'LV-T350A&si-EZH2-1', 'Detection Pval', 'LV-
T350A&si-EZH2-2', 'Detection Pval', 'LV-T350A&si-EZH2-3', 'Detection
Pval']
Adds the following keys to job_context:
columnIds: the identifiers of columns which contain expression data
probeId: which is the value of the column containing the probe identifiers.
detectionPval: a string which identifies Pvalue columns
"""
try:
input_file = job_context["input_file_path"]
headers = None
with open(input_file, "r") as tsv_in:
tsv_in = csv.reader(tsv_in, delimiter="\t")
for row in tsv_in:
headers = row
break
# Ex GSE45331_non-normalized.txt
predicted_header = 0
if headers[0].upper() in ["TARGETID", "TARGET_ID"]:
predicted_header = 1
# First the probe ID column
if headers[predicted_header].upper() not in [
"ID_REF",
"PROBE_ID",
"IDREF",
"PROBEID",
"REF_ID",
"REFID",
"IDPROBE",
"ID_PROBE",
]:
job_context["job"].failure_reason = (
"Could not find any ID column in headers "
+ str(headers)
+ " for file "
+ job_context["input_file_path"]
)
job_context["success"] = False
return job_context
else:
job_context["probeId"] = headers[predicted_header]
# Then the detection Pvalue string, which is always(?) some form of 'Detection Pval'
for header in headers:
# check if header contains something like "detection pval"
pvalue_header = re.match(r"(detection)(\W?)(pval\w*)", header, re.IGNORECASE)
if pvalue_header:
job_context["detectionPval"] = pvalue_header.string
break
else:
job_context["job"].failure_reason = "Could not detect PValue column!"
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
# Then, finally, create an absolutely bonkers regular expression
# which will explicitly hit on any sample which contains a sample
# ID _and_ ignores the magical word 'BEAD', etc. Great!
column_ids = ""
for sample in job_context["samples"]:
for offset, header in enumerate(headers, start=1):
if sample.title == header:
column_ids = column_ids + str(offset) + ","
continue
# Sometimes the title might actually be in the description field.
# To find this, look in all the related SampleAnnotations.
# Since there are multiple annotations, we need to break early before continuing.
# Related: https://github.com/AlexsLemonade/refinebio/issues/499
continue_me = False
for annotation in sample.sampleannotation_set.filter(is_ccdl=False):
try:
if annotation.data.get("description", "")[0] == header:
column_ids = column_ids + str(offset) + ","
continue_me = True
break
except Exception:
pass
if continue_me:
# Treat the header as the real title, as we will need it later.
sample.title = header
sample.save()
continue
if header.upper().replace(" ", "_") == "RAW_VALUE":
column_ids = column_ids + str(offset) + ","
continue
if (
sample.title.upper() in header.upper()
and "BEAD" not in header.upper()
and "NARRAYS" not in header.upper()
and "ARRAY_STDEV" not in header.upper()
and "PVAL" not in header.upper().replace(" ", "").replace("_", "")
):
column_ids = column_ids + str(offset) + ","
continue
for offset, header in enumerate(headers, start=1):
if "AVG_Signal" in header:
column_ids = column_ids + str(offset) + ","
continue
# Remove the trailing comma
column_ids = column_ids[:-1]
job_context["columnIds"] = column_ids
except Exception as e:
job_context["job"].failure_reason = str(e)
job_context["success"] = False
logger.exception(
"Failed to extract columns in " + job_context["input_file_path"], exception=str(e)
)
job_context["job"].no_retry = True
return job_context
return job_context
def _detect_platform(job_context: Dict) -> Dict:
"""
Determine the platform/database to process this sample with.
They often provide something like "V2" or "V 2", but we don't trust them so we detect it ourselves.
Related: https://github.com/AlexsLemonade/refinebio/issues/232
"""
all_databases = {
"HOMO_SAPIENS": [
"illuminaHumanv1",
"illuminaHumanv2",
"illuminaHumanv3",
"illuminaHumanv4",
],
"MUS_MUSCULUS": ["illuminaMousev1", "illuminaMousev1p1", "illuminaMousev2",],
"RATTUS_NORVEGICUS": ["illuminaRatv1"],
}
sample0 = job_context["samples"][0]
databases = all_databases[sample0.organism.name]
# Loop over all of the possible platforms and find the one with the best match.
highest = 0.0
high_mapped_percent = 0.0
high_db = None
for platform in databases:
try:
result = subprocess.check_output(
[
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/detect_database.R",
"--platform",
platform,
"--inputFile",
job_context["input_file_path"],
"--column",
job_context["probeId"],
]
)
results = result.decode().split("\n")
cleaned_result = float(results[0].strip())
if cleaned_result > highest:
highest = cleaned_result
high_db = platform
high_mapped_percent = float(results[1].strip())
except Exception as e:
logger.exception(e, processor_job_id=job_context["job"].id)
continue
# Record our sample detection outputs for every sample.
for sample in job_context["samples"]:
sa = SampleAnnotation()
sa.sample = sample
sa.is_ccdl = True
sa.data = {
"detected_platform": high_db,
"detection_percentage": highest,
"mapped_percentage": high_mapped_percent,
}
sa.save()
# If the match is over 75%, record this and process it on that platform.
if high_mapped_percent > 75.0:
job_context["platform"] = high_db
# The match percentage is too low - send this to the no-opper instead.
else:
logger.info("Match percentage too low, NO_OP'ing and aborting.", job=job_context["job_id"])
processor_job = ProcessorJob()
processor_job.pipeline_applied = "NO_OP"
processor_job.volume_index = job_context["job"].volume_index
processor_job.ram_amount = job_context["job"].ram_amount
processor_job.save()
assoc = ProcessorJobOriginalFileAssociation()
assoc.original_file = job_context["original_files"][0]
assoc.processor_job = processor_job
assoc.save()
try:
send_job(ProcessorPipeline.NO_OP, processor_job)
except Exception as e:
# Nomad dispatch error, likely during local test.
logger.error(e, job=processor_job)
job_context["abort"] = True
return job_context
def _run_illumina(job_context: Dict) -> Dict:
"""Processes an input TXT file to an output PCL file using a custom R script.
Expects a job_context which has been pre-populated with inputs, outputs
and the column identifiers which the R script needs for processing.
"""
try:
job_context["time_start"] = timezone.now()
formatted_command = [
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/illumina.R",
"--probeId",
job_context["probeId"],
"--expression",
job_context["columnIds"],
"--detection",
job_context["detectionPval"],
"--platform",
job_context["platform"],
"--inputFile",
job_context["input_file_path"],
"--outputFile",
job_context["output_file_path"],
"--cores",
str(multiprocessing.cpu_count()),
]
subprocess.check_output(formatted_command)
job_context["formatted_command"] = " ".join(formatted_command)
job_context["time_end"] = timezone.now()
except Exception as e:
error_template = (
"Encountered error in R code while running illumina.R"
" pipeline during processing of {0}: {1}"
)
error_message = error_template.format(job_context["input_file_path"], str(e))
logger.error(error_message, processor_job=job_context["job_id"])
job_context["job"].failure_reason = error_message
job_context["success"] = False
return job_context
def _create_result_objects(job_context: Dict) -> Dict:
""" Create the ComputationalResult objects after a Scan run is complete """
result = ComputationalResult()
result.commands.append(job_context["formatted_command"])
result.is_ccdl = True
result.is_public = True
result.time_start = job_context["time_start"]
result.time_end = job_context["time_end"]
try:
processor_key = "ILLUMINA_SCAN"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context["pipeline"].steps.append(result.id)
# Split the result into smashable subfiles
big_tsv = job_context["output_file_path"]
data = pd.read_csv(big_tsv, sep="\t", header=0, index_col=0)
individual_files = []
frames = np.split(data, len(data.columns), axis=1)
for frame in frames:
filename = (
frame.columns.values[0].replace("&", "").replace("*", "").replace(";", "") + ".tsv"
)
frame_path = job_context["work_dir"] + filename
frame.to_csv(frame_path, sep="\t", encoding="utf-8")
# This needs to be the same as the ones in the job context!
try:
sample = job_context["samples"].get(title=frame.columns.values[0])
except Sample.DoesNotExist:
logger.error(
"Could not find sample for column while splitting Illumina file.",
title=frame.columns.values[0],
processor_job=job_context["job_id"],
file_path=big_tsv,
)
continue
computed_file = ComputedFile()
computed_file.absolute_file_path = frame_path
computed_file.filename = frame_path.split("/")[-1]
computed_file.result = result
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.is_public = True
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.save()
job_context["computed_files"].append(computed_file)
SampleResultAssociation.objects.get_or_create(sample=sample, result=result)
SampleComputedFileAssociation.objects.get_or_create(
sample=sample, computed_file=computed_file
)
individual_files.append(computed_file)
logger.debug("Created %s", result)
job_context["success"] = True
job_context["individual_files"] = individual_files
job_context["result"] = result
return job_context
def illumina_to_pcl(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.ILLUMINA.value)
return utils.run_pipeline(
{"job_id": job_id, "pipeline": pipeline},
[
utils.start_job,
_prepare_files,
_detect_columns,
_detect_platform,
_run_illumina,
_create_result_objects,
utils.end_job,
],
)
<|code_end|>
| foreman/data_refinery_foreman/foreman/main.py
<|code_start|>import datetime
import random
import socket
import sys
import time
import traceback
from typing import List
from django.conf import settings
from django.db.models.expressions import Q
from django.utils import timezone
import nomad
from nomad import Nomad
from nomad.api.exceptions import URLNotFoundNomadException
from data_refinery_common.job_lookup import (
SMASHER_JOB_TYPES,
Downloaders,
ProcessorPipeline,
SurveyJobTypes,
does_processor_job_have_samples,
is_file_rnaseq,
)
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
ComputedFile,
DownloaderJob,
DownloaderJobOriginalFileAssociation,
ProcessorJob,
ProcessorJobDatasetAssociation,
ProcessorJobOriginalFileAssociation,
SurveyJob,
SurveyJobKeyValue,
)
from data_refinery_common.performant_pagination.pagination import PerformantPaginator as Paginator
from data_refinery_common.utils import (
get_active_volumes,
get_env_variable,
get_env_variable_gracefully,
get_nomad_jobs_breakdown,
)
logger = get_and_configure_logger(__name__)
# Maximum number of retries, so the number of attempts will be one
# greater than this because of the first attempt
MAX_NUM_RETRIES = 2
# This can be overritten by the env var "MAX_TOTAL_JOBS"
# This number is related to the size of the nomad lead server instance.
# The larger the instance, the more jobs it can store in its memory at once.
# We've found these limits by testing:
# * t2.medium can handle 5000 jobs.
# * m5.xlarge can hanlde 20000 jobs.
DEFAULT_MAX_JOBS = 5000
PAGE_SIZE = 2000
# The number of jobs running on each currently running volume
VOLUME_WORK_DEPTH = dict()
TIME_OF_LAST_WORK_DEPTH_CHECK = timezone.now() - datetime.timedelta(minutes=10)
# The number of downloader jobs currently in the queue
DOWNLOADER_JOBS_IN_QUEUE = 0
TIME_OF_LAST_DOWNLOADER_JOB_CHECK = timezone.now() - datetime.timedelta(minutes=10)
# The desired number of active + pending jobs on a volume. Downloader jobs
# will be assigned to instances until this limit is reached.
DESIRED_WORK_DEPTH = 500
# This is the absolute max number of downloader jobs that should ever
# be queued across the whole cluster no matter how many nodes we
# have. This is important because too many downloader jobs and we take
# down NCBI.
HARD_MAX_DOWNLOADER_JOBS = 1000
# The minimum amount of time in between each iteration of the main
# loop. We could loop much less frequently than every two minutes if
# the work we do takes longer than 2 minutes, but this will prevent
# excessive spinning.
MIN_LOOP_TIME = datetime.timedelta(seconds=15)
# How frequently we dispatch Janitor jobs and clean unplaceable jobs
# out of the Nomad queue.
JANITOR_DISPATCH_TIME = datetime.timedelta(minutes=30)
# How frequently we clean up the database.
DBCLEAN_TIME = datetime.timedelta(hours=6)
# Setting this to a recent date will prevent the Foreman from queuing/requeuing
# jobs created before this cutoff.
JOB_CREATED_AT_CUTOFF = datetime.datetime(2019, 9, 19, tzinfo=timezone.utc)
def read_config_list(config_file: str) -> List[str]:
"""
Reads a file and returns a list with one item per line.
"""
return_list = []
with open(config_file) as config_list:
for line in config_list:
return_list.append(line.strip())
return return_list
# These are lists of accessions that are for
# subsets of experiments that we want to prioritize.
PEDIATRIC_ACCESSION_LIST = read_config_list("config/pediatric_accessions.txt")
HGU133PLUS2_ACCESSION_LIST = read_config_list("config/hgu133plus2_accessions.txt")
##
# Utilities
##
def handle_repeated_failure(job) -> None:
"""If a job fails too many times, log it and stop retrying."""
# Not strictly retried but will prevent the job from getting
# retried any more times.
job.retried = True
# success may already be False, but if it was a hung or lost job
# this will ensure it's marked as failed.
job.success = False
job.save()
# At some point this should become more noisy/attention
# grabbing. However for the time being just logging should be
# sufficient because all log messages will be closely monitored
# during early testing stages.
logger.warn(
"%s #%d failed %d times!!!",
job.__class__.__name__,
job.id,
MAX_NUM_RETRIES + 1,
failure_reason=job.failure_reason,
)
def update_volume_work_depth(window=datetime.timedelta(minutes=2)):
"""When a new job is created our local idea of the work depth is updated, but every so often
we refresh from Nomad how many jobs were stopped or killed"""
global VOLUME_WORK_DEPTH
global TIME_OF_LAST_WORK_DEPTH_CHECK
if timezone.now() - TIME_OF_LAST_WORK_DEPTH_CHECK > window:
# Reset the work depth dict in case a volume was removed since the last iteration
VOLUME_WORK_DEPTH = dict()
breakdown = get_nomad_jobs_breakdown()
# Loop through all active volumes, which are the keys to the
# fields aggregated by volume
for volume_index in get_active_volumes():
if volume_index in breakdown["nomad_pending_jobs_by_volume"]:
VOLUME_WORK_DEPTH[volume_index] = (
breakdown["nomad_pending_jobs_by_volume"][volume_index]
+ breakdown["nomad_running_jobs_by_volume"][volume_index]
)
else:
# There are no nomad jobs currently queued for the
# volume index, so set its work depth is 0.
VOLUME_WORK_DEPTH[volume_index] = 0
TIME_OF_LAST_WORK_DEPTH_CHECK = timezone.now()
def get_emptiest_volume() -> str:
# This should never get returned, because get_emptiest_volume() should only be called when there
# is one or more volumes with a work depth smaller than the DESIRED_WORK_DEPTH
emptiest_volume = {"index": "0", "work_depth": DESIRED_WORK_DEPTH}
for volume, work_depth in VOLUME_WORK_DEPTH.items():
if work_depth < emptiest_volume["work_depth"]:
emptiest_volume["index"] = volume
emptiest_volume["work_depth"] = work_depth
return emptiest_volume["index"]
##
# Job Prioritization
##
def prioritize_salmon_jobs(jobs: List) -> List:
"""Prioritizes salmon experiments based on how close to completion they are.
This is because salmon experiments have a final processing step
that must be performed on all the samples in the experiment, so if
9/10 samples in an experiment are processed then they can't
actually be used until that last sample is processed.
"""
# The strategy for doing so is to build a mapping between every
# salmon job in `jobs` to a priority. This priority will be what
# percentage of the samples in this experiment have been
# processed. Once we have that mapping we can sort those jobs by
# that priority and move them to the front of the list.
prioritized_jobs = []
for job in jobs:
try:
if type(job) == ProcessorJob and not does_processor_job_have_samples(job):
continue
# Salmon jobs are specifc to one sample.
sample = job.get_samples().pop()
# Skip jobs that aren't for Salmon. Handle both ProcessorJobs and DownloaderJobs.
if type(job) is ProcessorJob and job.pipeline_applied != ProcessorPipeline.SALMON.value:
continue
elif type(job) is DownloaderJob:
is_salmon_sample = False
for original_file in sample.original_files.all():
if is_file_rnaseq(original_file.filename):
is_salmon_sample = True
if not is_salmon_sample:
continue
# Get a set of unique samples that share at least one
# experiment with the sample this job is for.
related_samples = set()
for experiment in sample.experiments.all():
for related_sample in experiment.samples.all():
related_samples.add(related_sample)
# We cannot simply filter on is_processed because that field
# doesn't get set until every sample in an experiment is processed.
# Instead we are looking for one successful processor job.
processed_samples = 0
for related_sample in related_samples:
original_files = related_sample.original_files
if original_files.count() == 0:
logger.error(
"Salmon sample found without any original files!!!", sample=related_sample
)
elif original_files.first().processor_jobs.filter(success=True).count() >= 1:
processed_samples += 1
experiment_completion_percent = processed_samples / len(related_samples)
prioritized_jobs.append({"job": job, "priority": experiment_completion_percent})
except:
logger.debug("Exception caught while prioritizing salmon jobs!", job=job)
sorted_job_mappings = sorted(prioritized_jobs, reverse=True, key=lambda k: k["priority"])
sorted_jobs = [job_mapping["job"] for job_mapping in sorted_job_mappings]
# Remove all the jobs we're moving to the front of the list
for job in sorted_jobs:
jobs.remove(job)
return sorted_jobs + jobs
def prioritize_zebrafish_jobs(jobs: List) -> List:
"""Moves zebrafish jobs to the beginnging of the input list."""
zebrafish_jobs = []
for job in jobs:
try:
if type(job) == ProcessorJob and not does_processor_job_have_samples(job):
continue
# There aren't cross-species jobs, so just checking one sample's organism will be sufficient.
samples = job.get_samples()
for sample in samples:
if sample.organism.name == "DANIO_RERIO":
zebrafish_jobs.append(job)
break
except:
logger.debug("Exception caught while prioritizing zebrafish jobs!", job=job)
# Remove all the jobs we're moving to the front of the list
for job in zebrafish_jobs:
jobs.remove(job)
return zebrafish_jobs + jobs
def prioritize_jobs_by_accession(jobs: List, accession_list: List[str]) -> List:
"""Moves jobs whose accessions are in accession_lst to the beginning of the input list."""
prioritized_jobs = []
for job in jobs:
try:
if type(job) == ProcessorJob and not does_processor_job_have_samples(job):
continue
# All samples in a job correspond to the same experiment, so just check one sample.
samples = job.get_samples()
# Iterate through all the samples' experiments until one is
# found with an accession in `accession_list`.
is_prioritized_job = False
for sample in samples:
if is_prioritized_job:
# We found one! So stop looping
break
for experiment in sample.experiments.all():
if experiment.accession_code in accession_list:
prioritized_jobs.append(job)
is_prioritized_job = True
break
except:
logger.exception("Exception caught while prioritizing jobs by accession!", job=job)
# Remove all the jobs we're moving to the front of the list
for job in prioritized_jobs:
jobs.remove(job)
return prioritized_jobs + jobs
##
# Downloaders
##
def requeue_downloader_job(last_job: DownloaderJob) -> (bool, str):
"""Queues a new downloader job.
The new downloader job will have num_retries one greater than
last_job.num_retries.
Returns True and the volume index of the downloader job upon successful dispatching,
False and an empty string otherwise.
"""
num_retries = last_job.num_retries + 1
ram_amount = last_job.ram_amount
# If there's no start time then it's likely that the instance got
# cycled which means we didn't get OOM-killed, so we don't need to
# increase the RAM amount.
if last_job.start_time and last_job.failure_reason is None:
if ram_amount == 1024:
ram_amount = 4096
elif ram_amount == 4096:
ram_amount = 16384
original_file = last_job.original_files.first()
if not original_file:
last_job.no_retry = True
last_job.success = False
last_job.failure_reason = (
"Foreman told to requeue a DownloaderJob without an OriginalFile - why?!"
)
last_job.save()
logger.info(
"Foreman told to requeue a DownloaderJob without an OriginalFile - why?!",
last_job=str(last_job),
)
return False, ""
if not original_file.needs_processing():
last_job.no_retry = True
last_job.success = False
last_job.failure_reason = "Foreman told to redownload job with prior successful processing."
last_job.save()
logger.info(
"Foreman told to redownload job with prior successful processing.",
last_job=str(last_job),
)
return False, ""
first_sample = original_file.samples.first()
# This is a magic string that all the dbGaP studies appear to have
if first_sample and ("in the dbGaP study" in first_sample.title):
last_job.no_retry = True
last_job.success = False
last_job.failure_reason = "Sample is dbGaP access controlled."
last_job.save()
logger.info(
"Avoiding requeuing for DownloaderJob for dbGaP run accession: "
+ str(first_sample.accession_code)
)
return False, ""
new_job = DownloaderJob(
num_retries=num_retries,
downloader_task=last_job.downloader_task,
ram_amount=ram_amount,
accession_code=last_job.accession_code,
was_recreated=last_job.was_recreated,
volume_index=get_emptiest_volume(),
)
new_job.save()
for original_file in last_job.original_files.all():
DownloaderJobOriginalFileAssociation.objects.get_or_create(
downloader_job=new_job, original_file=original_file
)
logger.debug(
"Requeuing Downloader Job which had ID %d with a new Downloader Job with ID %d.",
last_job.id,
new_job.id,
)
try:
if send_job(Downloaders[last_job.downloader_task], job=new_job, is_dispatch=True):
last_job.retried = True
last_job.success = False
last_job.retried_job = new_job
last_job.save()
else:
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
return False, ""
except:
logger.error(
"Failed to requeue Downloader Job which had ID %d with a new Downloader Job with ID %d.",
last_job.id,
new_job.id,
)
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
return False, ""
return True, new_job.volume_index
def count_downloader_jobs_in_queue(window=datetime.timedelta(minutes=2)) -> int:
"""Counts how many downloader jobs in the Nomad queue do not have status of 'dead'."""
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
global TIME_OF_LAST_DOWNLOADER_JOB_CHECK
global DOWNLOADER_JOBS_IN_QUEUE
if timezone.now() - TIME_OF_LAST_DOWNLOADER_JOB_CHECK > window:
try:
all_downloader_jobs = nomad_client.jobs.get_jobs(prefix="DOWNLOADER")
total = 0
for job in all_downloader_jobs:
if job["ParameterizedJob"] and job["JobSummary"].get("Children", None):
total = total + job["JobSummary"]["Children"]["Pending"]
total = total + job["JobSummary"]["Children"]["Running"]
DOWNLOADER_JOBS_IN_QUEUE = total
except:
# Nomad is down, return an impossibly high number to prevent
# additonal queuing from happening:
DOWNLOADER_JOBS_IN_QUEUE = sys.maxsize
TIME_OF_LAST_DOWNLOADER_JOB_CHECK = timezone.now()
return DOWNLOADER_JOBS_IN_QUEUE
def get_capacity_for_downloader_jobs() -> int:
"""Returns how many downloader jobs the queue has capacity for.
"""
update_volume_work_depth()
total_capacity = 0
for work_depth in VOLUME_WORK_DEPTH.values():
if work_depth >= DESIRED_WORK_DEPTH:
continue
total_capacity += DESIRED_WORK_DEPTH - work_depth
downloader_jobs_in_queue = count_downloader_jobs_in_queue()
if downloader_jobs_in_queue + total_capacity >= HARD_MAX_DOWNLOADER_JOBS:
return HARD_MAX_DOWNLOADER_JOBS - downloader_jobs_in_queue
return total_capacity
def handle_downloader_jobs(jobs: List[DownloaderJob]) -> None:
"""For each job in jobs, either retry it or log it.
No more than queue_capacity jobs will be retried.
"""
global VOLUME_WORK_DEPTH
global DOWNLOADER_JOBS_IN_QUEUE
queue_capacity = get_capacity_for_downloader_jobs()
jobs_dispatched = 0
for count, job in enumerate(jobs):
if jobs_dispatched >= queue_capacity:
logger.info(
"We hit the maximum downloader jobs / capacity ceiling, so we're not handling any more downloader jobs now."
)
return
if job.num_retries < MAX_NUM_RETRIES:
requeue_success, dispatched_volume = requeue_downloader_job(job)
if requeue_success:
jobs_dispatched = jobs_dispatched + 1
VOLUME_WORK_DEPTH[dispatched_volume] += 1
DOWNLOADER_JOBS_IN_QUEUE += 1
else:
handle_repeated_failure(job)
def retry_failed_downloader_jobs() -> None:
"""Handle downloader jobs that were marked as a failure."""
failed_jobs = (
DownloaderJob.failed_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_downloader_jobs()
paginator = Paginator(failed_jobs, PAGE_SIZE, "created_at")
page = paginator.page()
page_count = 0
if queue_capacity <= 0:
logger.info(
"Not handling failed (explicitly-marked-as-failure) downloader jobs "
"because there is no capacity for them."
)
while queue_capacity > 0:
logger.info(
"Handling page %d of failed (explicitly-marked-as-failure) downloader jobs!", page_count
)
handle_downloader_jobs(page.object_list)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_downloader_jobs()
else:
break
def retry_hung_downloader_jobs() -> None:
"""Retry downloader jobs that were started but never finished."""
potentially_hung_jobs = (
DownloaderJob.hung_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_downloader_jobs()
if queue_capacity <= 0:
logger.info(
"Not handling failed (explicitly-marked-as-failure) downloader jobs "
"because there is no capacity for them."
)
paginator = Paginator(potentially_hung_jobs, PAGE_SIZE, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
hung_jobs = []
for job in page.object_list:
try:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
if job_status != "running":
# Make sure it didn't finish since our original query.
job.refresh_from_db()
if job.end_time is None:
hung_jobs.append(job)
except URLNotFoundNomadException:
hung_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception(
"Couldn't query Nomad about Downloader Job.", downloader_job=job.id
)
if hung_jobs:
logger.info(
"Handling page %d of hung (started-but-never-finished) downloader jobs!",
page_count,
jobs_count=len(hung_jobs),
)
handle_downloader_jobs(hung_jobs)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_downloader_jobs()
else:
break
def retry_lost_downloader_jobs() -> None:
"""Retry downloader jobs that went too long without being started.
Idea: at some point this function could integrate with the spot
instances to determine if jobs are hanging due to a lack of
instances. A naive time-based implementation like this could end
up retrying every single queued job if there were a long period
during which the price of spot instance is higher than our bid
price.
"""
global VOLUME_WORK_DEPTH
global DOWNLOADER_JOBS_IN_QUEUE
potentially_lost_jobs = (
DownloaderJob.lost_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_downloader_jobs()
if queue_capacity <= 0:
logger.info(
"Not handling failed (explicitly-marked-as-failure) downloader jobs "
"because there is no capacity for them."
)
paginator = Paginator(potentially_lost_jobs, PAGE_SIZE, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
lost_jobs = []
jobs_queued_from_this_page = 0
for job in page.object_list:
try:
if job.nomad_job_id:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
# If the job is still pending, then it makes sense that it
# hasn't started and if it's running then it may not have
# been able to mark the job record as started yet.
if job_status != "pending" and job_status != "running":
logger.debug(
(
"Determined that a downloader job needs to be requeued because its"
" Nomad Job's status is: %s."
),
job_status,
job_id=job.id,
)
lost_jobs.append(job)
elif jobs_queued_from_this_page < queue_capacity:
# The job never got put in the Nomad queue, no
# need to recreate it, we just gotta queue it up!
job.volume_index = get_emptiest_volume()
job.save()
send_job(Downloaders[job.downloader_task], job=job, is_dispatch=True)
jobs_queued_from_this_page += 1
VOLUME_WORK_DEPTH[job.volume_index] += 1
except socket.timeout:
logger.info("Timeout connecting to Nomad - is Nomad down?", job_id=job.id)
except URLNotFoundNomadException:
logger.debug(
(
"Determined that a downloader job needs to be requeued because "
"querying for its Nomad job failed: "
),
job_id=job.id,
)
lost_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception(
"Couldn't query Nomad about Downloader Job.", downloader_job=job.id
)
if lost_jobs and get_capacity_for_downloader_jobs() > 0:
logger.info(
"Handling page %d of lost (never-started) downloader jobs!",
page_count,
len_jobs=len(lost_jobs),
)
handle_downloader_jobs(lost_jobs)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_downloader_jobs()
else:
break
##
# Processors
##
def requeue_processor_job(last_job: ProcessorJob) -> None:
"""Queues a new processor job.
The new processor job will have num_retries one greater than
last_job.num_retries.
"""
num_retries = last_job.num_retries + 1
# The Salmon pipeline is quite RAM-sensitive.
# Try it again with an increased RAM amount, if possible.
new_ram_amount = last_job.ram_amount
# If there's no start time then it's likely that the instance got
# cycled which means we didn't get OOM-killed, so we don't need to
# increase the RAM amount.
if last_job.start_time:
# These initial values are set in common/job_lookup.py:determine_ram_amount
if (
last_job.pipeline_applied == "SALMON"
or last_job.pipeline_applied == "TXIMPORT"
or last_job.pipeline_applied.startswith("TRANSCRIPTOME")
):
if new_ram_amount == 4096:
new_ram_amount = 8192
if new_ram_amount == 8192:
new_ram_amount = 12288
elif new_ram_amount == 12288:
new_ram_amount = 16384
elif new_ram_amount == 16384:
new_ram_amount = 32768
elif new_ram_amount == 32768:
new_ram_amount = 65536
# The AFFY pipeline is somewhat RAM-sensitive.
# Also NO_OP can fail and be retried, so we want to attempt ramping up ram.
# Try it again with an increased RAM amount, if possible.
elif last_job.pipeline_applied == "AFFY_TO_PCL" or last_job.pipeline_applied == "NO_OP":
if new_ram_amount == 2048:
new_ram_amount = 4096
elif new_ram_amount == 4096:
new_ram_amount = 8192
elif new_ram_amount == 8192:
new_ram_amount = 32768
elif (
last_job.pipeline_applied == "ILLUMINA_TO_PCL"
and "non-zero exit status -9" in last_job.failure_reason
):
if new_ram_amount == 2048:
new_ram_amount = 4096
elif new_ram_amount == 4096:
new_ram_amount = 8192
volume_index = last_job.volume_index
# Make sure volume_index is set to something, unless it's a
# smasher job type because the smasher instance doesn't have a
# volume_index.
if (not volume_index or volume_index == "-1") and ProcessorPipeline[
last_job.pipeline_applied
] not in SMASHER_JOB_TYPES:
active_volumes = get_active_volumes()
if len(active_volumes) < 1 or not settings.RUNNING_IN_CLOUD:
logger.debug("No active volumes to requeue processor job.", job_id=last_job.id)
return
else:
volume_index = random.choice(list(active_volumes))
new_job = ProcessorJob(
num_retries=num_retries,
pipeline_applied=last_job.pipeline_applied,
ram_amount=new_ram_amount,
volume_index=volume_index,
)
new_job.save()
for original_file in last_job.original_files.all():
ProcessorJobOriginalFileAssociation.objects.get_or_create(
processor_job=new_job, original_file=original_file
)
for dataset in last_job.datasets.all():
ProcessorJobDatasetAssociation.objects.get_or_create(processor_job=new_job, dataset=dataset)
try:
logger.debug(
"Requeuing Processor Job which had ID %d with a new Processor Job with ID %d.",
last_job.id,
new_job.id,
)
if send_job(ProcessorPipeline[last_job.pipeline_applied], job=new_job, is_dispatch=True):
last_job.retried = True
last_job.success = False
last_job.retried_job = new_job
last_job.save()
else:
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
except:
logger.warn(
"Failed to requeue Processor Job which had ID %d with a new Processor Job with ID %d.",
last_job.id,
new_job.id,
exc_info=1,
)
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
def get_capacity_for_processor_jobs(nomad_client) -> bool:
"""Returns how many processor jobs the queue has capacity for.
"""
# Maximum number of total jobs running at a time.
# We do this now rather than import time for testing purposes.
MAX_TOTAL_JOBS = int(get_env_variable_gracefully("MAX_TOTAL_JOBS", DEFAULT_MAX_JOBS))
len_all_jobs = len(nomad_client.jobs.get_jobs())
return MAX_TOTAL_JOBS - len_all_jobs
def handle_processor_jobs(
jobs: List[ProcessorJob], queue_capacity: int = None, ignore_ceiling=False
) -> None:
"""For each job in jobs, either retry it or log it.
No more than queue_capacity jobs will be retried.
"""
# Maximum number of total jobs running at a time.
# We do this now rather than import time for testing purposes.
if queue_capacity is None:
queue_capacity = int(get_env_variable_gracefully("MAX_TOTAL_JOBS", DEFAULT_MAX_JOBS))
# We want zebrafish data first, then hgu133plus2, then data
# related to pediatric cancer, then to finish salmon experiments
# that are close to completion.
# Each function moves the jobs it prioritizes to the front of the
# list, so apply them in backwards order.
# jobs = prioritize_salmon_jobs(jobs)
# jobs = prioritize_jobs_by_accession(jobs, PEDIATRIC_ACCESSION_LIST)
# jobs = prioritize_jobs_by_accession(jobs, HGU133PLUS2_ACCESSION_LIST)
# jobs = prioritize_zebrafish_jobs(jobs)
jobs_dispatched = 0
for count, job in enumerate(jobs):
if not ignore_ceiling and jobs_dispatched >= queue_capacity:
logger.info(
"We hit the maximum total jobs ceiling, so we're not handling any more processor jobs now."
)
return
if job.num_retries < MAX_NUM_RETRIES:
requeue_processor_job(job)
jobs_dispatched = jobs_dispatched + 1
else:
handle_repeated_failure(job)
def retry_failed_processor_jobs() -> None:
"""Handle processor jobs that were marked as a failure.
Ignores Janitor jobs since they are queued every half hour anyway."""
try:
active_volumes = get_active_volumes()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
pass
failed_jobs = (
ProcessorJob.failed_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.filter(Q(volume_index__isnull=True) | Q(volume_index__in=active_volumes))
.exclude(pipeline_applied="JANITOR")
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
paginator = Paginator(failed_jobs, 200, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
logger.info(
"Handling page %d of failed (explicitly-marked-as-failure) processor jobs!", page_count
)
handle_processor_jobs(page.object_list, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
else:
break
def retry_hung_processor_jobs() -> None:
"""Retry processor jobs that were started but never finished.
Ignores Janitor jobs since they are queued every half hour anyway."""
try:
active_volumes = get_active_volumes()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
pass
potentially_hung_jobs = (
ProcessorJob.hung_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.filter(Q(volume_index__isnull=True) | Q(volume_index__in=active_volumes))
.exclude(pipeline_applied="JANITOR")
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
paginator = Paginator(potentially_hung_jobs, 200, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
hung_jobs = []
for job in page.object_list:
try:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
if job_status != "running":
# Make sure it didn't finish since our original query.
job.refresh_from_db()
if job.end_time is None:
hung_jobs.append(job)
except URLNotFoundNomadException:
hung_jobs.append(job)
except TypeError:
# Almost certainly a python-nomad issue:
# File "/usr/local/lib/python3.5/dist-packages/nomad/api/job.py", line 63, in get_job
# return self.request(id, method="get").json()
# File "/usr/local/lib/python3.5/dist-packages/nomad/api/base.py", line 74, in request
# endpoint = self._endpoint_builder(self.ENDPOINT, *args)
# File "/usr/local/lib/python3.5/dist-packages/nomad/api/base.py", line 28, in _endpoint_builder
# u = "/".join(args)
# TypeError: sequence item 1: expected str instance, NoneType found
logger.info("Couldn't query Nomad about Processor Job.", processor_job=job.id)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception("Couldn't query Nomad about Processor Job.", processor_job=job.id)
if hung_jobs:
logger.info(
"Handling hung page %d of (started-but-never-finished) processor jobs!",
page_count,
len_jobs=len(hung_jobs),
)
handle_processor_jobs(hung_jobs, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
else:
break
def retry_lost_processor_jobs() -> None:
"""Retry processor jobs which never even got started for too long.
Ignores Janitor jobs since they are queued every half hour anyway."""
try:
active_volumes = get_active_volumes()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
pass
potentially_lost_jobs = (
ProcessorJob.lost_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF)
.filter(Q(volume_index__isnull=True) | Q(volume_index__in=active_volumes))
.exclude(pipeline_applied="JANITOR")
.order_by("created_at")
.prefetch_related("original_files__samples")
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=5)
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
paginator = Paginator(potentially_lost_jobs, 200, "created_at")
page = paginator.page()
page_count = 0
while queue_capacity > 0:
lost_jobs = []
for job in page.object_list:
try:
if job.nomad_job_id:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
# If the job is still pending, then it makes sense that it
# hasn't started and if it's running then it may not have
# been able to mark the job record as started yet.
if job_status != "pending" and job_status != "running":
logger.debug(
(
"Determined that a processor job needs to be requeued because its"
" Nomad Job's status is: %s."
),
job_status,
job_id=job.id,
)
lost_jobs.append(job)
else:
# If there is no nomad_job_id field set, we could be
# in the small window where the job was created but
# hasn't yet gotten a chance to be queued.
# If this job really should be restarted we'll get it in the next loop.
if timezone.now() - job.created_at > MIN_LOOP_TIME:
lost_jobs.append(job)
except URLNotFoundNomadException:
logger.debug(
(
"Determined that a processor job needs to be requeued because "
"querying for its Nomad job failed: "
),
job_id=job.id,
)
lost_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception("Couldn't query Nomad about Processor Job.", processor_job=job.id)
if lost_jobs:
logger.info(
"Handling lost page %d of (never-started) processor jobs!",
page_count,
len_jobs=len(lost_jobs),
)
handle_processor_jobs(lost_jobs, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_processor_jobs(nomad_client)
else:
break
##
# Surveyors
##
def requeue_survey_job(last_job: SurveyJob) -> None:
"""Queues a new survey job.
The new survey job will have num_retries one greater than
last_job.num_retries.
"""
num_retries = last_job.num_retries + 1
new_job = SurveyJob(num_retries=num_retries, source_type=last_job.source_type)
if new_job.num_retries == 1:
new_job.ram_amount = 4096
elif new_job.num_retries in [2, 3]:
new_job.ram_amount = 16384
else:
new_job.ram_amount = 256
new_job.save()
keyvalues = SurveyJobKeyValue.objects.filter(survey_job=last_job)
for keyvalue in keyvalues:
SurveyJobKeyValue.objects.get_or_create(
survey_job=new_job, key=keyvalue.key, value=keyvalue.value,
)
logger.debug(
"Requeuing SurveyJob which had ID %d with a new SurveyJob with ID %d.",
last_job.id,
new_job.id,
)
try:
if send_job(SurveyJobTypes.SURVEYOR, job=new_job, is_dispatch=True):
last_job.retried = True
last_job.success = False
last_job.retried_job = new_job
last_job.save()
else:
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
except:
logger.error(
"Failed to requeue Survey Job which had ID %d with a new Surevey Job with ID %d.",
last_job.id,
new_job.id,
)
# Can't communicate with nomad just now, leave the job for a later loop.
new_job.delete()
return True
def get_capacity_for_survey_jobs(nomad_client) -> bool:
"""Returns how many survey jobs the queue has capacity for.
"""
# Maximum number of total jobs running at a time.
# We do this now rather than import time for testing purposes.
MAX_TOTAL_JOBS = int(get_env_variable_gracefully("MAX_TOTAL_JOBS", DEFAULT_MAX_JOBS))
len_all_jobs = len(nomad_client.jobs.get_jobs())
return MAX_TOTAL_JOBS - len_all_jobs
def handle_survey_jobs(jobs: List[SurveyJob], queue_capacity: int = None) -> None:
"""For each job in jobs, either retry it or log it.
No more than queue_capacity jobs will be retried.
"""
# Maximum number of total jobs running at a time.
# We do this now rather than import time for testing purposes.
if queue_capacity is None:
queue_capacity = int(get_env_variable_gracefully("MAX_TOTAL_JOBS", DEFAULT_MAX_JOBS))
jobs_dispatched = 0
for count, job in enumerate(jobs):
if jobs_dispatched >= queue_capacity:
logger.info(
"We hit the maximum total jobs ceiling, so we're not handling any more survey jobs now."
)
return
if job.num_retries < MAX_NUM_RETRIES:
requeue_survey_job(job)
jobs_dispatched = jobs_dispatched + 1
else:
handle_repeated_failure(job)
def retry_failed_survey_jobs() -> None:
"""Handle survey jobs that were marked as a failure."""
failed_jobs = SurveyJob.failed_objects.filter(created_at__gt=JOB_CREATED_AT_CUTOFF).order_by(
"pk"
)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
paginator = Paginator(failed_jobs, 200)
page = paginator.page()
page_count = 0
while queue_capacity > 0:
logger.info(
"Handling page %d of failed (explicitly-marked-as-failure) survey jobs!", page_count
)
handle_survey_jobs(page.object_list, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
else:
break
def retry_hung_survey_jobs() -> None:
"""Retry survey jobs that were started but never finished."""
potentially_hung_jobs = SurveyJob.hung_objects.filter(
created_at__gt=JOB_CREATED_AT_CUTOFF
).order_by("pk")
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
paginator = Paginator(potentially_hung_jobs, 200)
page = paginator.page()
page_count = 0
while queue_capacity > 0:
hung_jobs = []
for job in page.object_list:
try:
# Surveyor jobs didn't always have nomad_job_ids. If they
# don't have one then by this point they've definitely died.
if job.nomad_job_id:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
else:
job_status = "absent"
if job_status != "running":
# Make sure it didn't finish since our original query.
job.refresh_from_db()
if job.end_time is None:
hung_jobs.append(job)
except URLNotFoundNomadException:
hung_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception("Couldn't query Nomad about SurveyJob Job.", survey_job=job.id)
if hung_jobs:
logger.info(
"Handling page %d of hung (started-but-never-finished) survey jobs!",
page_count,
len_jobs=len(hung_jobs),
)
handle_survey_jobs(hung_jobs, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
else:
break
def retry_lost_survey_jobs() -> None:
"""Retry survey jobs which never even got started for too long."""
potentially_lost_jobs = SurveyJob.lost_objects.filter(
created_at__gt=JOB_CREATED_AT_CUTOFF
).order_by("pk")
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
paginator = Paginator(potentially_lost_jobs, 200)
page = paginator.page()
page_count = 0
while queue_capacity > 0:
lost_jobs = []
for job in page.object_list:
try:
# Surveyor jobs didn't always have nomad_job_ids. If they
# don't have one then by this point they've definitely died.
if job.nomad_job_id:
job_status = nomad_client.job.get_job(job.nomad_job_id)["Status"]
else:
job_status = "absent"
# If the job is still pending, then it makes sense that it
# hasn't started and if it's running then it may not have
# been able to mark the job record as started yet.
if job_status != "pending" and job_status != "running":
logger.debug(
(
"Determined that a survey job needs to be requeued because its"
" Nomad Job's status is: %s."
),
job_status,
job_id=job.id,
)
lost_jobs.append(job)
except URLNotFoundNomadException:
logger.debug(
(
"Determined that a survey job needs to be requeued because "
"querying for its Nomad job failed."
),
job_id=job.id,
)
lost_jobs.append(job)
except nomad.api.exceptions.BaseNomadException:
raise
except Exception:
logger.exception("Couldn't query Nomad about Processor Job.", survey_job=job.id)
if lost_jobs:
logger.info(
"Handling page %d of lost (never-started) survey jobs!",
page_count,
len_jobs=len(lost_jobs),
)
handle_survey_jobs(lost_jobs, queue_capacity)
if page.has_next():
page = paginator.page(page.next_page_number())
page_count = page_count + 1
queue_capacity = get_capacity_for_survey_jobs(nomad_client)
else:
break
##
# Janitor
##
def send_janitor_jobs():
"""Dispatch a Janitor job for each instance in the cluster"""
try:
active_volumes = get_active_volumes()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
pass
# Clean up the smasher:
active_volumes.add(None)
for volume_index in active_volumes:
new_job = ProcessorJob(
num_retries=0, pipeline_applied="JANITOR", ram_amount=2048, volume_index=volume_index
)
new_job.save()
logger.info("Sending Janitor with index: ", job_id=new_job.id, index=volume_index)
try:
send_job(ProcessorPipeline["JANITOR"], job=new_job, is_dispatch=True)
except Exception:
# If we can't dispatch this job, something else has gone wrong.
continue
##
# Handling of node cycling
##
def cleanup_the_queue():
"""This cleans up any jobs which cannot currently be queued.
We often have more volumes than instances because we have enough
volumes for the scenario where the entire cluster is using the
smallest instance type, however that doesn't happen very
often. Therefore it's possible for some volumes to not be mounted,
which means that jobs which are constrained to run on instances
with those volumes cannot be placed and just clog up the queue.
Therefore we clear out jobs of that type every once in a while so
our queue is dedicated to jobs that can actually be placed.
"""
logger.info("Removing all jobs from Nomad queue whose volumes are not mounted.")
DOWNLOADER = "DOWNLOADER"
# Smasher and QN Reference jobs aren't tied to a specific EBS volume.
indexed_job_types = [
pipeline.value for pipeline in ProcessorPipeline if pipeline not in SMASHER_JOB_TYPES
]
# Special case for downloader jobs because they only have one
# nomad job type for all downloader tasks.
indexed_job_types.append(DOWNLOADER)
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
try:
active_volumes = get_active_volumes()
jobs = nomad_client.jobs.get_jobs()
except:
# If we cannot reach Nomad now then we can wait until a later loop.
logger.warn("Couldn't query Nomad about current jobs.", exc_info=1)
return
logger.info(
(
"These are the currently active volumes. Jobs for "
"other volumes will now be removed from the Nomad queue."
),
active_volumes=active_volumes,
)
num_jobs_killed = 0
for job in jobs:
# Skip over the Parameterized Jobs because we need those to
# always be running.
if "ParameterizedJob" not in job or job["ParameterizedJob"]:
continue
# We're only concerned with jobs that have to be tied to a volume index.
if "ParentID" not in job:
continue
# ensure the job is one of the indexed_job_types
job_type = None
for pipeline in indexed_job_types:
if job["ParentID"].startswith(pipeline):
job_type = pipeline
break
if not job_type:
continue
# If this job has an index, then its ParentID will
# have the pattern of <job-type>_<index>_<RAM-amount>
# and we want to check the value of <index>:
split_parent_id = job["ParentID"].split("_")
if len(split_parent_id) < 2:
continue
else:
index = split_parent_id[-2]
if index not in active_volumes:
# The index for this job isn't currently mounted, kill the job
# `num_retries` will be decremented when the job receives the SIGKILL
try:
nomad_client.job.deregister_job(job["ID"], purge=True)
logger.info(
"Foreman Killed nomad job because it had a volume that was not active",
nomad_job_id=job["ID"],
job_type=job_type,
)
num_jobs_killed += 1
except:
logger.exception(
"Could not remove Nomad job from the Nomad queue.",
nomad_job_id=job["ID"],
job_type=job_type,
)
# If we can't do this for some reason, we'll get it next loop.
logger.info("Removed %d jobs from the Nomad queue.", num_jobs_killed)
def clean_database():
""" Removes duplicated objects that may have appeared through race, OOM, bugs, etc.
See: https://github.com/AlexsLemonade/refinebio/issues/1183
"""
# Hide smashable files
computed_files = ComputedFile.objects.filter(s3_bucket=None, s3_key=None, is_smashable=True)
logger.info("Cleaning unsynced files!", num_to_clean=computed_files.count())
# We don't do this in bulk because we want the properties set by save() as well
for computed_file in computed_files:
computed_file.is_public = False
computed_file.save()
logger.info("Cleaned files!")
##
# Main loop
##
def monitor_jobs():
"""Main Foreman thread that helps manage the Nomad job queue.
Will find jobs that failed, hung, or got lost and requeue them.
Also will queue up Janitor jobs regularly to free up disk space.
Also cleans jobs out of the Nomad queue which cannot be queued
because the volume containing the job's data isn't mounted.
It does so on a loop forever that won't spin faster than
MIN_LOOP_TIME, but it may spin slower than that.
"""
last_janitorial_time = timezone.now()
last_dbclean_time = timezone.now()
while True:
# Perform two heartbeats, one for the logs and one for Monit:
logger.info("The Foreman's heart is beating, but he does not feel.")
# Write the health file for Monit to check
now_secs = int(time.time())
with open("/tmp/foreman_last_time", "w") as timefile:
timefile.write(str(now_secs))
start_time = timezone.now()
# Requeue jobs of each failure class for each job type.
# The order of processor -> downloader -> surveyor is intentional.
# Processors go first so we process data sitting on disk.
# Downloaders go first so we actually queue up the jobs in the database.
# Surveyors go last so we don't end up with tons and tons of unqueued jobs.
requeuing_functions_in_order = [
retry_failed_processor_jobs,
retry_hung_processor_jobs,
retry_lost_processor_jobs,
retry_failed_downloader_jobs,
retry_hung_downloader_jobs,
retry_lost_downloader_jobs,
retry_failed_survey_jobs,
retry_hung_survey_jobs,
retry_lost_survey_jobs,
]
for function in requeuing_functions_in_order:
try:
function()
except Exception:
logger.error("Caught exception in %s: ", function.__name__)
traceback.print_exc(chain=False)
if settings.RUNNING_IN_CLOUD:
if timezone.now() - last_janitorial_time > JANITOR_DISPATCH_TIME:
send_janitor_jobs()
cleanup_the_queue()
last_janitorial_time = timezone.now()
if timezone.now() - last_dbclean_time > DBCLEAN_TIME:
clean_database()
last_dbclean_time = timezone.now()
loop_time = timezone.now() - start_time
if loop_time < MIN_LOOP_TIME:
remaining_time = MIN_LOOP_TIME - loop_time
if remaining_time.seconds > 0:
time.sleep(remaining_time.seconds)
<|code_end|>
foreman/data_refinery_foreman/surveyor/geo.py
<|code_start|>import shutil
from re import sub
from typing import Dict, List
import dateutil.parser
import GEOparse
from data_refinery_common.job_lookup import Downloaders
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
Experiment,
ExperimentAnnotation,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
Organism,
OriginalFile,
OriginalFileSampleAssociation,
Sample,
SampleAnnotation,
SurveyJobKeyValue,
)
from data_refinery_common.utils import (
FileUtils,
get_normalized_platform,
get_readable_affymetrix_names,
get_supported_microarray_platforms,
get_supported_rnaseq_platforms,
)
from data_refinery_foreman.surveyor import harmony, utils
from data_refinery_foreman.surveyor.external_source import ExternalSourceSurveyor
logger = get_and_configure_logger(__name__)
GEOparse.logger.set_verbosity("WARN")
UNKNOWN = "UNKNOWN"
class GeoSurveyor(ExternalSourceSurveyor):
"""Surveys NCBI GEO for data.
Implements the ExternalSourceSurveyor interface.
"""
def source_type(self):
return Downloaders.GEO.value
def get_temp_path(self):
return "/tmp/" + str(self.survey_job.id) + "/"
def set_platform_properties(
self, sample_object: Sample, sample_metadata: Dict, gse: GEOparse.GSM
) -> Sample:
"""Sets platform-related properties on `sample_object`.
Uses metadata from `gse` to populate platform_name,
platform_accession_code, and technology on `sample_object`.
"""
# Determine platform information
external_accession = get_normalized_platform(gse.metadata.get("platform_id", [UNKNOWN])[0])
if external_accession == UNKNOWN:
sample_object.platform_accession_code = UNKNOWN
sample_object.platform_name = UNKNOWN
sample_object.manufacturer = UNKNOWN
# If this sample is Affy, we potentially can extract the
# platform information from the .CEL file. If it's not we
# can't do anything. Therefore assume the technology is
# microarray when we have no platform information.
sample_object.technology = "MICROARRAY"
return sample_object
platform_accession_code = UNKNOWN
gpl = GEOparse.get_GEO(
external_accession, destdir=self.get_temp_path(), how="brief", silent=True
)
platform_title = gpl.metadata.get("title", [UNKNOWN])[0]
# Check if this is a supported microarray platform.
for platform in get_supported_microarray_platforms():
if platform["external_accession"] == external_accession:
platform_accession_code = platform["platform_accession"]
if platform_accession_code != UNKNOWN:
# It's a supported microarray platform.
# We are using the brain array package as the platform accession code,
# so, for instance, GPL3213 becomes 'chicken'.
sample_object.platform_accession_code = platform_accession_code
sample_object.technology = "MICROARRAY"
try:
# Related: https://github.com/AlexsLemonade/refinebio/issues/354
# If it's Affy we can get a readable name:
sample_object.platform_name = get_readable_affymetrix_names()[
platform_accession_code
]
sample_object.manufacturer = "AFFYMETRIX"
# Sometimes Affymetrix samples have weird channel
# protocol metadata, so if we find that it's
# Affymetrix return it now. Example: GSE113945
return sample_object
except KeyError:
# Otherwise we'll use what we've got.
sample_object.platform_name = platform_title
# Determine manufacturer
platform = sample_object.pretty_platform.upper()
if "AGILENT" in platform:
sample_object.manufacturer = "AGILENT"
elif "ILLUMINA" in platform or "NEXTSEQ" in platform:
sample_object.manufacturer = "ILLUMINA"
elif "AFFYMETRIX" in platform:
sample_object.manufacturer = "AFFYMETRIX"
else:
sample_object.manufacturer = UNKNOWN
return sample_object
# Check to see if this is a supported RNASeq technology:
# GEO RNASeq platform titles often have organisms appended to
# an otherwise recognizable platform. The list of supported
# RNASeq platforms isn't long, so see if any of them are
# contained within what GEO gave us.
# Example: GSE69572 has a platform title of:
# 'Illumina Genome Analyzer IIx (Glycine max)'
# Which should really just be 'Illumina Genome Analyzer IIx'
# because RNASeq platforms are organism agnostic. However,
# the platforms 'Illumina Genome Analyzer' and 'Illumina
# Genome Analyzer II' would also be matched, so make sure that
# the longest platform names are tested first:
sorted_platform_list = get_supported_rnaseq_platforms().copy()
sorted_platform_list.sort(key=len, reverse=True)
for platform in sorted_platform_list:
if platform.upper() in platform_title.upper():
sample_object.technology = "RNA-SEQ"
sample_object.platform_name = platform
# We just use RNASeq platform titles as accessions
sample_object.platform_accession_code = platform
if "ILLUMINA" in sample_object.platform_name.upper():
sample_object.manufacturer = "ILLUMINA"
elif "NEXTSEQ" in sample_object.platform_name.upper():
sample_object.manufacturer = "NEXTSEQ"
elif "ION TORRENT" in sample_object.platform_name.upper():
sample_object.manufacturer = "ION_TORRENT"
else:
sample_object.manufacturer = UNKNOWN
return sample_object
# If we've made it this far, we don't know what this platform
# is, therefore we can't know what its technology is. What we
# do know is what GEO said was it's platform's accession and
# title are, and that it's unsupported.
sample_object.platform_name = platform_title
sample_object.platform_accession_code = external_accession
sample_object.technology = UNKNOWN
sample_object.manufacturer = UNKNOWN
return sample_object
def get_miniml_url(self, experiment_accession_code):
""" Build the URL for the MINiML files for this accession code.
ex:
'GSE68061' -> 'ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE68nnn/GSE68061/miniml/GSE68061_family.xml.tgz'
"""
geo = experiment_accession_code.upper()
geotype = geo[:3]
range_subdir = sub(r"\d{1,3}$", "nnn", geo)
min_url_template = (
"ftp://ftp.ncbi.nlm.nih.gov/geo/" "series/{range_subdir}/{record}/miniml/{record_file}"
)
min_url = min_url_template.format(
range_subdir=range_subdir, record=geo, record_file="%s_family.xml.tgz" % geo
)
return min_url
@staticmethod
def get_sample_protocol_info(sample_metadata, sample_accession_code):
protocol_info = dict()
if "extract_protocol_ch1" in sample_metadata:
protocol_info["Extraction protocol"] = sample_metadata["extract_protocol_ch1"]
if "label_protocol_ch1" in sample_metadata:
protocol_info["Label protocol"] = sample_metadata["label_protocol_ch1"]
if "hyb_protocol" in sample_metadata:
protocol_info["Hybridization protocol"] = sample_metadata["hyb_protocol"]
if "scan_protocol" in sample_metadata:
protocol_info["Scan protocol"] = sample_metadata["scan_protocol"]
if "data_processing" in sample_metadata:
protocol_info["Data processing"] = sample_metadata["data_processing"]
protocol_info["Reference"] = (
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + sample_accession_code
)
return protocol_info
@staticmethod
def _apply_harmonized_metadata_to_sample(sample: Sample, harmonized_metadata: dict):
"""Applies the harmonized metadata to `sample`"""
for key, value in harmonized_metadata.items():
setattr(sample, key, value)
@staticmethod
def _apply_metadata_to_experiment(experiment: Experiment, gse):
""" Gets the metadata out of gse and applies it to the experiment"""
experiment.source_url = (
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + experiment.accession_code
)
experiment.source_database = "GEO"
experiment.title = gse.metadata.get("title", [""])[0]
experiment.description = gse.metadata.get("summary", [""])[0]
# Source doesn't provide time information, assume midnight.
submission_date = gse.metadata["submission_date"][0] + " 00:00:00 UTC"
experiment.source_first_published = dateutil.parser.parse(submission_date)
last_updated_date = gse.metadata["last_update_date"][0] + " 00:00:00 UTC"
experiment.source_last_updated = dateutil.parser.parse(last_updated_date)
unique_institutions = list(set(gse.metadata["contact_institute"]))
experiment.submitter_institution = ", ".join(unique_institutions)
experiment.pubmed_id = gse.metadata.get("pubmed_id", [""])[0]
# Scrape publication title and authorship from Pubmed
if experiment.pubmed_id:
pubmed_metadata = utils.get_title_and_authors_for_pubmed_id(experiment.pubmed_id)
experiment.publication_title = pubmed_metadata[0]
experiment.publication_authors = pubmed_metadata[1]
def create_experiment_and_samples_from_api(
self, experiment_accession_code
) -> (Experiment, List[Sample]):
""" The main surveyor - find the Experiment and Samples from NCBI GEO.
Uses the GEOParse library, for which docs can be found here: https://geoparse.readthedocs.io/en/latest/usage.html#working-with-geo-objects
"""
# Cleaning up is tracked here: https://github.com/guma44/GEOparse/issues/41
gse = GEOparse.get_GEO(
experiment_accession_code, destdir=self.get_temp_path(), how="brief", silent=True
)
preprocessed_samples = harmony.preprocess_geo(gse.gsms.items())
harmonized_samples = harmony.harmonize(preprocessed_samples)
# Create the experiment object
try:
experiment_object = Experiment.objects.get(accession_code=experiment_accession_code)
logger.debug(
"Experiment %s already exists, skipping object creation.",
experiment_accession_code,
survey_job=self.survey_job.id,
)
except Experiment.DoesNotExist:
experiment_object = Experiment()
experiment_object.accession_code = experiment_accession_code
GeoSurveyor._apply_metadata_to_experiment(experiment_object, gse)
experiment_object.save()
experiment_annotation = ExperimentAnnotation()
experiment_annotation.data = gse.metadata
experiment_annotation.experiment = experiment_object
experiment_annotation.is_ccdl = False
experiment_annotation.save()
# Okay, here's the situation!
# Sometimes, samples have a direct single representation for themselves.
# Othertimes, there is a single file with references to every sample in it.
created_samples = []
for sample_accession_code, sample in gse.gsms.items():
try:
sample_object = Sample.objects.get(accession_code=sample_accession_code)
logger.debug(
"Sample %s from experiment %s already exists, skipping object creation.",
sample_accession_code,
experiment_object.accession_code,
survey_job=self.survey_job.id,
)
# Associate it with the experiment, but since it
# already exists it already has original files
# associated with it and it's already been downloaded,
# so don't add it to created_samples.
ExperimentSampleAssociation.objects.get_or_create(
experiment=experiment_object, sample=sample_object
)
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment_object, organism=sample_object.organism
)
except Sample.DoesNotExist:
organism = Organism.get_object_for_name(sample.metadata["organism_ch1"][0].upper())
sample_object = Sample()
sample_object.source_database = "GEO"
sample_object.accession_code = sample_accession_code
sample_object.organism = organism
# If data processing step, it isn't raw.
sample_object.has_raw = not sample.metadata.get("data_processing", None)
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment_object, organism=organism
)
sample_object.title = sample.metadata["title"][0]
self.set_platform_properties(sample_object, sample.metadata, gse)
GeoSurveyor._apply_harmonized_metadata_to_sample(
sample_object, harmonized_samples[sample_object.title]
)
# Sample-level protocol_info
sample_object.protocol_info = self.get_sample_protocol_info(
sample.metadata, sample_accession_code
)
sample_object.save()
logger.debug("Created Sample: " + str(sample_object))
metadata = sample.metadata
metadata["geo_columns"] = list(sample.columns.index)
sample_annotation = SampleAnnotation()
sample_annotation.sample = sample_object
sample_annotation.data = metadata
sample_annotation.is_ccdl = False
sample_annotation.save()
sample_supplements = sample.metadata.get("supplementary_file", [])
for supplementary_file_url in sample_supplements:
# Why do they give us this?
if supplementary_file_url == "NONE":
break
# We never want these!
if "idat.gz" in supplementary_file_url.lower():
continue
if "chp.gz" in supplementary_file_url.lower():
continue
if "ndf.gz" in supplementary_file_url.lower():
continue
if "pos.gz" in supplementary_file_url.lower():
continue
if "pair.gz" in supplementary_file_url.lower():
continue
if "gff.gz" in supplementary_file_url.lower():
continue
# Sometimes, we are lied to about the data processing step.
lower_file_url = supplementary_file_url.lower()
if (
".cel" in lower_file_url
or ("_non_normalized.txt" in lower_file_url)
or ("_non-normalized.txt" in lower_file_url)
or ("-non-normalized.txt" in lower_file_url)
or ("-non_normalized.txt" in lower_file_url)
):
sample_object.has_raw = True
sample_object.save()
# filename and source_filename are the same for these
filename = FileUtils.get_filename(supplementary_file_url)
original_file = OriginalFile.objects.get_or_create(
source_url=supplementary_file_url,
filename=filename,
source_filename=filename,
has_raw=sample_object.has_raw,
is_archive=FileUtils.is_archive(filename),
)[0]
logger.debug("Created OriginalFile: " + str(original_file))
original_file_sample_association = OriginalFileSampleAssociation.objects.get_or_create(
original_file=original_file, sample=sample_object
)
if original_file.is_affy_data():
# Only Affymetrix Microarrays produce .CEL files
sample_object.technology = "MICROARRAY"
sample_object.manufacturer = "AFFYMETRIX"
sample_object.save()
# It's okay to survey RNA-Seq samples from GEO, but we
# don't actually want to download/process any RNA-Seq
# data unless it comes from SRA.
if sample_object.technology != "RNA-SEQ":
created_samples.append(sample_object)
# Now that we've determined the technology at the
# sample level, we can set it at the experiment level,
# just gotta make sure to only do it once. There can
# be more than one technology, this should be changed
# as part of:
# https://github.com/AlexsLemonade/refinebio/issues/1099
if not experiment_object.technology:
experiment_object.technology = sample_object.technology
experiment_object.save()
ExperimentSampleAssociation.objects.get_or_create(
experiment=experiment_object, sample=sample_object
)
# These supplementary files _may-or-may-not_ contain the type of raw data we can process.
for experiment_supplement_url in gse.metadata.get("supplementary_file", []):
# filename and source_filename are the same for these
filename = experiment_supplement_url.split("/")[-1]
original_file = OriginalFile.objects.get_or_create(
source_url=experiment_supplement_url,
filename=filename,
source_filename=filename,
has_raw=sample_object.has_raw,
is_archive=True,
)[0]
logger.debug("Created OriginalFile: " + str(original_file))
lower_supplement_url = experiment_supplement_url.lower()
if (
("_non_normalized.txt" in lower_supplement_url)
or ("_non-normalized.txt" in lower_supplement_url)
or ("-non-normalized.txt" in lower_supplement_url)
or ("-non_normalized.txt" in lower_supplement_url)
):
for sample_object in created_samples:
sample_object.has_raw = True
sample_object.save()
OriginalFileSampleAssociation.objects.get_or_create(
sample=sample_object, original_file=original_file
)
# Delete this Original file if it isn't being used.
if (
OriginalFileSampleAssociation.objects.filter(original_file=original_file).count()
== 0
):
original_file.delete()
# These are the Miniml/Soft/Matrix URLs that are always(?) provided.
# GEO describes different types of data formatting as "families"
family_url = self.get_miniml_url(experiment_accession_code)
miniml_original_file = OriginalFile.objects.get_or_create(
source_url=family_url,
source_filename=family_url.split("/")[-1],
has_raw=sample_object.has_raw,
is_archive=True,
)[0]
for sample_object in created_samples:
# We don't need a .txt if we have a .CEL
if sample_object.has_raw:
continue
OriginalFileSampleAssociation.objects.get_or_create(
sample=sample_object, original_file=miniml_original_file
)
# Delete this Original file if it isn't being used.
if (
OriginalFileSampleAssociation.objects.filter(original_file=miniml_original_file).count()
== 0
):
miniml_original_file.delete()
# Trash the temp path
try:
shutil.rmtree(self.get_temp_path())
except Exception:
# There was a problem during surveying so this didn't get created.
# It's not a big deal.
pass
return experiment_object, created_samples
def discover_experiment_and_samples(self) -> (Experiment, List[Sample]):
""" Dispatches the surveyor, returns the results """
experiment_accession_code = SurveyJobKeyValue.objects.get(
survey_job_id=self.survey_job.id, key__exact="experiment_accession_code"
).value
logger.debug(
"Surveying experiment with accession code: %s.",
experiment_accession_code,
survey_job=self.survey_job.id,
)
experiment, samples = self.create_experiment_and_samples_from_api(experiment_accession_code)
return experiment, samples
<|code_end|>
workers/data_refinery_workers/processors/illumina.py
<|code_start|>import csv
import multiprocessing
import os
import re
import subprocess
from typing import Dict
from django.utils import timezone
import numpy as np
import pandas as pd
from data_refinery_common.job_lookup import PipelineEnum, ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
Pipeline,
ProcessorJob,
ProcessorJobOriginalFileAssociation,
Sample,
SampleAnnotation,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
logger = get_and_configure_logger(__name__)
def _prepare_files(job_context: Dict) -> Dict:
"""Adds the keys "input_file_path" and "output_file_path" to
job_context so everything is prepared for processing.
"""
# All files for the job are in the same directory.
job_context["work_dir"] = (
LOCAL_ROOT_DIR + "/" + "processor_job_" + str(job_context["job_id"]) + "/"
)
os.makedirs(job_context["work_dir"], exist_ok=True)
original_file = job_context["original_files"][0]
sanitized_filename = original_file.absolute_file_path.split("/")[-1] + ".sanitized"
job_context["input_file_path"] = job_context["work_dir"] + sanitized_filename
new_filename = original_file.absolute_file_path.split("/")[-1].replace(".txt", ".PCL")
job_context["output_file_path"] = job_context["work_dir"] + new_filename
# Sanitize this file so R doesn't choke.
# Some have comments, some have non-comment-comments.
with open(original_file.absolute_file_path, "r") as file_input:
with open(job_context["input_file_path"], "w") as file_output:
for line in file_input:
if (
"#" not in line
and line.strip() != ""
and line != "\n"
and "\t" in line
and line[0:3].upper() != "GSM"
and line[0] != "'"
and line[0] != '"'
and line[0] != "!"
and line[0] != "/"
and line[0] != "<"
and line[0] != "\t"
):
file_output.write(line)
return job_context
def _detect_columns(job_context: Dict) -> Dict:
""" Detect which columns match to which inputs.
Related: https://github.com/AlexsLemonade/refinebio/issues/86#issuecomment-379308817
We need to find:
First column should be ID_REF or PROBE_ID and the type should be string.
Detection Pval column
Expression column (contains sample title and NOT 'BEAD')
Header examples:
['ID_REF', 'LV-C&si-Control-1', 'Detection Pval',
'LV-C&si-Control-2', 'Detection Pval', 'LV-C&si-Control-3', 'Detection
Pval', 'LV-C&si-EZH2-1', 'Detection Pval', 'LV-C&si-EZH2-2', 'Detection
Pval', 'LV-C&si-EZH2-3', 'Detection Pval', 'LV-EZH2&si-EZH2-1',
'Detection Pval', 'LV-EZH2&si-EZH2-2', 'Detection Pval', 'LV-EZH2&si-
EZH2-3', 'Detection Pval', 'LV-T350A&si-EZH2-1', 'Detection Pval', 'LV-
T350A&si-EZH2-2', 'Detection Pval', 'LV-T350A&si-EZH2-3', 'Detection
Pval']
Adds the following keys to job_context:
columnIds: the identifiers of columns which contain expression data
probeId: which is the value of the column containing the probe identifiers.
detectionPval: a string which identifies Pvalue columns
"""
try:
input_file = job_context["input_file_path"]
headers = None
with open(input_file, "r") as tsv_in:
tsv_in = csv.reader(tsv_in, delimiter="\t")
for row in tsv_in:
headers = row
break
# Ex GSE45331_non-normalized.txt
predicted_header = 0
if headers[0].upper() in ["TARGETID", "TARGET_ID"]:
predicted_header = 1
# First the probe ID column
if headers[predicted_header].upper() not in [
"ID_REF",
"PROBE_ID",
"IDREF",
"PROBEID",
"REF_ID",
"REFID",
"IDPROBE",
"ID_PROBE",
]:
job_context["job"].failure_reason = (
"Could not find any ID column in headers "
+ str(headers)
+ " for file "
+ job_context["input_file_path"]
)
job_context["success"] = False
return job_context
else:
job_context["probeId"] = headers[predicted_header]
# Then check to make sure a detection pvalue exists, which is always(?) some form of
# 'Detection Pval'
for header in headers:
# check if header contains something like "detection pval"
pvalue_header = re.search(r"(detection)(\W?)(pval\w*)", header, re.IGNORECASE)
if pvalue_header:
break
else:
job_context["job"].failure_reason = "Could not detect PValue column!"
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
# Then, finally, create an absolutely bonkers regular expression
# which will explicitly hit on any sample which contains a sample
# ID _and_ ignores the magical word 'BEAD', etc. Great!
column_ids = set()
for sample in job_context["samples"]:
for offset, header in enumerate(headers, start=1):
if sample.title == header:
column_ids.add(offset)
continue
# Sometimes the title might actually be in the description field.
# To find this, look in all the related SampleAnnotations.
# Since there are multiple annotations, we need to break early before continuing.
# Related: https://github.com/AlexsLemonade/refinebio/issues/499
continue_me = False
for annotation in sample.sampleannotation_set.filter(is_ccdl=False):
try:
if annotation.data.get("description", "")[0] == header:
column_ids.add(offset)
continue_me = True
break
except Exception:
pass
if continue_me:
# Treat the header as the real title, as we will need it later.
sample.title = header
sample.save()
continue
if header.upper().replace(" ", "_") == "RAW_VALUE":
columns_ids.add(offset)
continue
if (
sample.title.upper() in header.upper()
and "BEAD" not in header.upper()
and "NARRAYS" not in header.upper()
and "ARRAY_STDEV" not in header.upper()
and "PVAL" not in header.upper().replace(" ", "").replace("_", "")
):
column_ids.add(offset)
continue
for offset, header in enumerate(headers, start=1):
if "AVG_Signal" in header:
column_ids.add(offset)
continue
job_context["columnIds"] = ",".join(map(lambda id: str(id), column_ids))
except Exception as e:
job_context["job"].failure_reason = str(e)
job_context["success"] = False
logger.exception(
"Failed to extract columns in " + job_context["input_file_path"], exception=str(e)
)
job_context["job"].no_retry = True
return job_context
return job_context
def _detect_platform(job_context: Dict) -> Dict:
"""
Determine the platform/database to process this sample with.
They often provide something like "V2" or "V 2", but we don't trust them so we detect it ourselves.
Related: https://github.com/AlexsLemonade/refinebio/issues/232
"""
all_databases = {
"HOMO_SAPIENS": [
"illuminaHumanv1",
"illuminaHumanv2",
"illuminaHumanv3",
"illuminaHumanv4",
],
"MUS_MUSCULUS": ["illuminaMousev1", "illuminaMousev1p1", "illuminaMousev2",],
"RATTUS_NORVEGICUS": ["illuminaRatv1"],
}
sample0 = job_context["samples"][0]
databases = all_databases[sample0.organism.name]
# Loop over all of the possible platforms and find the one with the best match.
highest = 0.0
high_mapped_percent = 0.0
high_db = None
for platform in databases:
try:
result = subprocess.check_output(
[
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/detect_database.R",
"--platform",
platform,
"--inputFile",
job_context["input_file_path"],
"--column",
job_context["probeId"],
]
)
results = result.decode().split("\n")
cleaned_result = float(results[0].strip())
if cleaned_result > highest:
highest = cleaned_result
high_db = platform
high_mapped_percent = float(results[1].strip())
except Exception as e:
logger.exception(e, processor_job_id=job_context["job"].id)
continue
# Record our sample detection outputs for every sample.
for sample in job_context["samples"]:
sa = SampleAnnotation()
sa.sample = sample
sa.is_ccdl = True
sa.data = {
"detected_platform": high_db,
"detection_percentage": highest,
"mapped_percentage": high_mapped_percent,
}
sa.save()
# If the match is over 75%, record this and process it on that platform.
if high_mapped_percent > 75.0:
job_context["platform"] = high_db
# The match percentage is too low - send this to the no-opper instead.
else:
logger.info("Match percentage too low, NO_OP'ing and aborting.", job=job_context["job_id"])
processor_job = ProcessorJob()
processor_job.pipeline_applied = "NO_OP"
processor_job.volume_index = job_context["job"].volume_index
processor_job.ram_amount = job_context["job"].ram_amount
processor_job.save()
assoc = ProcessorJobOriginalFileAssociation()
assoc.original_file = job_context["original_files"][0]
assoc.processor_job = processor_job
assoc.save()
try:
send_job(ProcessorPipeline.NO_OP, processor_job)
except Exception as e:
# Nomad dispatch error, likely during local test.
logger.error(e, job=processor_job)
job_context["abort"] = True
return job_context
def _run_illumina(job_context: Dict) -> Dict:
"""Processes an input TXT file to an output PCL file using a custom R script.
Expects a job_context which has been pre-populated with inputs, outputs
and the column identifiers which the R script needs for processing.
"""
try:
job_context["time_start"] = timezone.now()
formatted_command = [
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/illumina.R",
"--probeId",
job_context["probeId"],
"--expression",
job_context["columnIds"],
"--platform",
job_context["platform"],
"--inputFile",
job_context["input_file_path"],
"--outputFile",
job_context["output_file_path"],
"--cores",
str(multiprocessing.cpu_count()),
]
subprocess.check_output(formatted_command)
job_context["formatted_command"] = " ".join(formatted_command)
job_context["time_end"] = timezone.now()
except Exception as e:
error_template = (
"Encountered error in R code while running illumina.R"
" pipeline during processing of {0}: {1}"
)
error_message = error_template.format(job_context["input_file_path"], str(e))
logger.error(error_message, processor_job=job_context["job_id"])
job_context["job"].failure_reason = error_message
job_context["success"] = False
return job_context
def _get_sample_for_column(column: str, job_context: Dict) -> Sample:
# First of all check if the title is the column name
try:
return job_context["samples"].get(title=column)
except Sample.DoesNotExist:
pass
# If the column name is not the title, maybe they used the convention
# <SAMPLE_TITLE>(.AVG)?_Signal
title_match = re.match(r"(?P<title>.*?)(\.AVG)?_Signal", column)
if title_match is not None:
try:
return job_context["samples"].get(title=title_match.group("title"))
except Sample.DoesNotExist:
pass
# Or maybe they also have a named detection pvalue column using the same
# naming scheme
name_match = re.match(r"(?P<name>.*)\.AVG_Signal", column)
if name_match is not None:
try:
return job_context["samples"].get(
sampleannotation__data__geo_columns__contains="{}.Detection Pval".format(
name_match.group("name")
)
)
except Sample.DoesNotExist:
pass
return None
def _create_result_objects(job_context: Dict) -> Dict:
""" Create the ComputationalResult objects after a Scan run is complete """
result = ComputationalResult()
result.commands.append(job_context["formatted_command"])
result.is_ccdl = True
result.is_public = True
result.time_start = job_context["time_start"]
result.time_end = job_context["time_end"]
try:
processor_key = "ILLUMINA_SCAN"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context["pipeline"].steps.append(result.id)
# Split the result into smashable subfiles
big_tsv = job_context["output_file_path"]
data = pd.read_csv(big_tsv, sep="\t", header=0, index_col=0)
individual_files = []
frames = np.split(data, len(data.columns), axis=1)
for frame in frames:
filename = (
frame.columns.values[0].replace("&", "").replace("*", "").replace(";", "") + ".tsv"
)
frame_path = job_context["work_dir"] + filename
frame.to_csv(frame_path, sep="\t", encoding="utf-8")
# This needs to be the same as the ones in the job context!
sample = _get_sample_for_column(frame.columns.values[0], job_context)
if sample is None:
job_context["job"].failure_reason = (
"Could not find sample for column "
+ frame.columns.values[0]
+ " while splitting Illumina file "
+ big_tsv
)
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
computed_file = ComputedFile()
computed_file.absolute_file_path = frame_path
computed_file.filename = frame_path.split("/")[-1]
computed_file.result = result
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.is_public = True
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.save()
job_context["computed_files"].append(computed_file)
SampleResultAssociation.objects.get_or_create(sample=sample, result=result)
SampleComputedFileAssociation.objects.get_or_create(
sample=sample, computed_file=computed_file
)
individual_files.append(computed_file)
logger.debug("Created %s", result)
job_context["success"] = True
job_context["individual_files"] = individual_files
job_context["result"] = result
return job_context
def illumina_to_pcl(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.ILLUMINA.value)
return utils.run_pipeline(
{"job_id": job_id, "pipeline": pipeline},
[
utils.start_job,
_prepare_files,
_detect_columns,
_detect_platform,
_run_illumina,
_create_result_objects,
utils.end_job,
],
)
<|code_end|>
|
Validate that samples added to the dataset are downloadable
### Context
We allow datasets like [this](https://www.refine.bio/dataset/98e449b3-b485-4786-8529-79dc50e4cd5b) to be created.
### Problem or idea
This dataset has *at least one experiment* and it also has *at least one sample per experiment*, but it still isn't valid because the samples are not downloadable.
### Solution or next step
The API should also verify that all the samples added to the dataset are downloadable.
| api/data_refinery_api/serializers.py
<|code_start|>from collections import defaultdict
from django.db.models import Count, Q
from rest_framework import serializers
import boto3
from django_elasticsearch_dsl_drf.serializers import DocumentSerializer
from data_refinery_common.models import (
APIToken,
CompendiumResult,
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
ExperimentAnnotation,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
Sample,
SampleAnnotation,
SurveyJob,
)
from data_refinery_common.models.documents import ExperimentDocument
s3 = boto3.client("s3")
##
# Organism
##
class OrganismSerializer(serializers.ModelSerializer):
class Meta:
model = Organism
fields = (
"name",
"taxonomy_id",
)
##
# Processor
##
class ProcessorSerializer(serializers.ModelSerializer):
class Meta:
model = Processor
fields = ("id", "name", "version", "docker_image", "environment")
##
# Transcriptome Index
##
class OrganismIndexSerializer(serializers.ModelSerializer):
organism_name = serializers.StringRelatedField(source="organism", read_only=True)
download_url = serializers.SerializerMethodField()
class Meta:
model = OrganismIndex
fields = (
"id",
"assembly_name",
"organism_name",
"source_version",
"index_type",
"salmon_version",
"download_url",
"result_id",
"last_modified",
)
read_only_fields = fields
def get_download_url(self, obj):
computed_file = obj.get_computed_file()
if computed_file is not None:
return computed_file.s3_url
return None
##
# Results
##
class DetailedExperimentSampleSerializer(serializers.ModelSerializer):
class Meta:
model = Sample
fields = (
"accession_code",
"platform_name",
"pretty_platform",
"technology",
"is_processed",
)
class ComputationalResultAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = ComputationalResultAnnotation
fields = ("id", "data", "is_ccdl", "created_at", "last_modified")
class ComputedFileSerializer(serializers.ModelSerializer):
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"size_in_bytes",
"is_smashable",
"is_qc",
"sha1",
"s3_bucket",
"s3_key",
"created_at",
"last_modified",
)
class ComputedFileWithUrlSerializer(serializers.ModelSerializer):
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"size_in_bytes",
"is_smashable",
"is_qc",
"sha1",
"s3_bucket",
"s3_key",
"download_url",
"created_at",
"last_modified",
)
class ComputationalResultSerializer(serializers.ModelSerializer):
annotations = ComputationalResultAnnotationSerializer(
many=True, source="computationalresultannotation_set"
)
processor = ProcessorSerializer(many=False)
organism_index = OrganismIndexSerializer(many=False)
files = ComputedFileSerializer(many=True, source="computedfile_set")
class Meta:
model = ComputationalResult
fields = (
"id",
"commands",
"processor",
"is_ccdl",
"annotations",
"files",
"organism_index",
"time_start",
"time_end",
"created_at",
"last_modified",
)
class ComputationalResultWithUrlSerializer(ComputationalResultSerializer):
files = ComputedFileWithUrlSerializer(many=True, source="computedfile_set")
class ComputationalResultNoFilesSerializer(serializers.ModelSerializer):
annotations = ComputationalResultAnnotationSerializer(
many=True, source="computationalresultannotation_set"
)
processor = ProcessorSerializer(many=False)
organism_index = OrganismIndexSerializer(many=False)
class Meta:
model = ComputationalResult
fields = (
"id",
"commands",
"processor",
"is_ccdl",
"annotations",
"organism_index",
"time_start",
"time_end",
"created_at",
"last_modified",
)
class QNTargetSerializer(serializers.ModelSerializer):
result = ComputationalResultNoFilesSerializer(many=False)
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"size_in_bytes",
"is_qn_target",
"sha1",
"s3_bucket",
"s3_key",
"s3_url",
"created_at",
"last_modified",
"result",
)
class ComputedFileListSerializer(serializers.ModelSerializer):
result = ComputationalResultNoFilesSerializer(many=False)
samples = DetailedExperimentSampleSerializer(many=True)
compendia_organism_name = serializers.CharField(
source="compendia_organism__name", read_only=True
)
def __init__(self, *args, **kwargs):
super(ComputedFileListSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"samples",
"size_in_bytes",
"is_qn_target",
"is_smashable",
"is_qc",
"is_compendia",
"quant_sf_only",
"compendia_version",
"compendia_organism_name",
"sha1",
"s3_bucket",
"s3_key",
"s3_url",
"download_url",
"created_at",
"last_modified",
"result",
)
extra_kwargs = {
"download_url": {
"help_text": "This will contain an url to download the file. You must send a valid [token](#tag/token) in order to receive this."
}
}
class OriginalFileListSerializer(serializers.ModelSerializer):
class Meta:
model = OriginalFile
fields = (
"id",
"filename",
"samples",
"size_in_bytes",
"sha1",
"samples",
"processor_jobs",
"downloader_jobs",
"source_url",
"is_archive",
"source_filename",
"has_raw",
"created_at",
"last_modified",
)
##
# Samples
##
class SampleSerializer(serializers.ModelSerializer):
organism = OrganismSerializer(many=False)
class Meta:
model = Sample
fields = (
"id",
"title",
"accession_code",
"source_database",
"organism",
"platform_accession_code",
"platform_name",
"pretty_platform",
"technology",
"manufacturer",
"protocol_info",
"is_processed",
"created_at",
"last_modified",
)
class SampleAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = SampleAnnotation
fields = (
"data",
"is_ccdl",
"created_at",
"last_modified",
)
class DetailedSamplesComputationalResultSerializer(serializers.ModelSerializer):
processor = ProcessorSerializer(many=False)
organism_index = OrganismIndexSerializer(many=False)
class Meta:
model = ComputationalResult
fields = (
"id",
"processor",
"organism_index",
)
class DetailedSampleSerializer(serializers.ModelSerializer):
annotations = SampleAnnotationSerializer(many=True, source="sampleannotation_set")
organism = OrganismSerializer(many=False)
results = DetailedSamplesComputationalResultSerializer(many=True)
class Meta:
model = Sample
fields = (
"id",
"title",
"accession_code",
"source_database",
"organism",
"platform_accession_code",
"platform_name",
"pretty_platform",
"technology",
"manufacturer",
"protocol_info",
"annotations",
"results",
"source_archive_url",
"has_raw",
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
"is_processed",
"created_at",
"last_modified",
"original_files",
"computed_files",
)
##
# Experiments
##
class ExperimentSerializer(serializers.ModelSerializer):
organism_names = serializers.StringRelatedField(many=True, source="organisms", read_only=True)
platforms = serializers.ReadOnlyField()
processed_samples = serializers.StringRelatedField(many=True)
total_samples_count = serializers.IntegerField(read_only=True)
sample_metadata = serializers.ReadOnlyField(source="get_sample_metadata_fields")
technologies = serializers.ReadOnlyField(source="get_sample_technologies")
pretty_platforms = serializers.ReadOnlyField()
class Meta:
model = Experiment
fields = (
"id",
"title",
"description",
"accession_code",
"alternate_accession_code",
"source_database",
"source_url",
"platforms",
"pretty_platforms",
"processed_samples",
"has_publication",
"publication_title",
"publication_doi",
"publication_authors",
"pubmed_id",
"total_samples_count",
"organism_names",
"submitter_institution",
"created_at",
"last_modified",
"source_first_published",
"source_last_modified",
"sample_metadata",
"technologies",
)
@staticmethod
def setup_eager_loading(queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related("samples").prefetch_related("organisms")
# Multiple count annotations
queryset = queryset.annotate(
total_samples_count=Count("samples", unique=True),
processed_samples_count=Count("samples", filter=Q(samples__is_processed=True)),
)
return queryset
class ExperimentAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = ExperimentAnnotation
fields = (
"data",
"is_ccdl",
"created_at",
"last_modified",
)
class DetailedExperimentSerializer(serializers.ModelSerializer):
annotations = ExperimentAnnotationSerializer(many=True, source="experimentannotation_set")
samples = DetailedExperimentSampleSerializer(many=True)
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
organism_names = serializers.StringRelatedField(many=True, source="organisms", read_only=True)
class Meta:
model = Experiment
fields = (
"id",
"title",
"description",
"annotations",
"samples",
"protocol_description",
"accession_code",
"source_database",
"source_url",
"has_publication",
"publication_title",
"publication_doi",
"publication_authors",
"pubmed_id",
"source_first_published",
"source_last_modified",
"submitter_institution",
"last_modified",
"created_at",
"organism_names",
"sample_metadata",
"num_total_samples",
"num_processed_samples",
"num_downloadable_samples",
)
class PlatformSerializer(serializers.ModelSerializer):
class Meta:
model = Sample
fields = (
"platform_accession_code",
"platform_name",
)
class InstitutionSerializer(serializers.ModelSerializer):
class Meta:
model = Experiment
fields = ("submitter_institution",)
##
# Files
##
class OriginalFileSerializer(serializers.ModelSerializer):
class Meta:
model = OriginalFile
fields = (
"id",
"filename",
"size_in_bytes",
"sha1",
"source_url",
"source_filename",
"is_downloaded",
"is_archive",
"has_raw",
"created_at",
"last_modified",
)
##
# Jobs
##
class SurveyJobSerializer(serializers.ModelSerializer):
class Meta:
model = SurveyJob
fields = (
"id",
"source_type",
"success",
"start_time",
"end_time",
"created_at",
"last_modified",
)
class DownloaderJobSerializer(serializers.ModelSerializer):
class Meta:
model = DownloaderJob
fields = (
"id",
"downloader_task",
"num_retries",
"retried",
"was_recreated",
"worker_id",
"worker_version",
"nomad_job_id",
"failure_reason",
"success",
"original_files",
"start_time",
"end_time",
"created_at",
"last_modified",
)
class ProcessorJobSerializer(serializers.ModelSerializer):
class Meta:
model = ProcessorJob
fields = (
"id",
"pipeline_applied",
"num_retries",
"retried",
"worker_id",
"ram_amount",
"volume_index",
"worker_version",
"failure_reason",
"nomad_job_id",
"success",
"original_files",
"datasets",
"start_time",
"end_time",
"created_at",
"last_modified",
)
##
# Datasets
##
def validate_dataset(data):
""" Basic dataset validation. Currently only checks formatting, not values. """
if data.get("data") is not None:
if type(data["data"]) != dict:
raise serializers.ValidationError("`data` must be a dict of lists.")
if data.get("start") and len(data["data"]) == 0:
raise serializers.ValidationError("`data` must contain at least one experiment..")
for key, value in data["data"].items():
if type(value) != list:
raise serializers.ValidationError(
"`data` must be a dict of lists. Problem with `" + str(key) + "`"
)
if len(value) < 1:
raise serializers.ValidationError(
"`data` must be a dict of lists, each with one or more elements. Problem with `"
+ str(key)
+ "`"
)
try:
if len(value) != len(set(value)):
raise serializers.ValidationError("Duplicate values detected in " + str(value))
except Exception as e:
raise serializers.ValidationError("Received bad dataset data: " + str(e))
else:
raise serializers.ValidationError("`data` must be a dict of lists.")
class CreateDatasetSerializer(serializers.ModelSerializer):
class Meta:
model = Dataset
fields = ("id", "data", "email_address", "email_ccdl_ok")
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
""" This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {"required": True,},
"id": {"read_only": True,},
"is_processing": {"read_only": True,},
"is_processed": {"read_only": True,},
"is_available": {"read_only": True,},
"expires_on": {"read_only": True,},
"s3_bucket": {"read_only": True,},
"s3_key": {"read_only": True,},
"success": {"read_only": True,},
"failure_reason": {"read_only": True,},
"created_at": {"read_only": True,},
"last_modified": {"read_only": True,},
"size_in_bytes": {"read_only": True,},
"sha1": {"read_only": True,},
"download_url": {"read_only": True,},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
class APITokenSerializer(serializers.ModelSerializer):
class Meta:
model = APIToken
fields = ("id", "is_activated", "terms_and_conditions")
extra_kwargs = {
"id": {"read_only": True},
"is_activated": {"read_only": False},
"terms_and_conditions": {"read_only": True},
}
class CompendiumResultSerializer(serializers.ModelSerializer):
primary_organism_name = serializers.StringRelatedField(
read_only=True, source="primary_organism"
)
organism_names = serializers.StringRelatedField(many=True, source="organisms", read_only=True)
computed_file = ComputedFileSerializer(source="get_computed_file", read_only=True)
class Meta:
model = CompendiumResult
fields = (
"id",
"primary_organism_name",
"organism_names",
"svd_algorithm",
"quant_sf_only",
"compendium_version",
"computed_file",
)
read_only_fields = fields
class CompendiumResultWithUrlSerializer(serializers.ModelSerializer):
primary_organism_name = serializers.StringRelatedField(
read_only=True, source="primary_organism"
)
organism_names = serializers.StringRelatedField(many=True, source="organisms", read_only=True)
computed_file = ComputedFileWithUrlSerializer(source="get_computed_file", read_only=True)
class Meta:
model = CompendiumResult
fields = (
"id",
"primary_organism_name",
"organism_names",
"svd_algorithm",
"quant_sf_only",
"compendium_version",
"computed_file",
)
read_only_fields = fields
##
# ElasticSearch Document Serializers
##
class ExperimentDocumentSerializer(DocumentSerializer):
"""Serializer for the Experiment document."""
class Meta(object):
"""Meta options."""
document = ExperimentDocument
fields = (
"id",
"title",
"publication_title",
"description",
"technology",
"accession_code",
"alternate_accession_code",
"submitter_institution",
"has_publication",
"publication_doi",
"publication_authors",
"sample_metadata_fields",
"platform_names",
"platform_accession_codes",
"organism_names",
"pubmed_id",
"num_total_samples",
"num_processed_samples",
"num_downloadable_samples",
"source_first_published",
)
read_only_fields = fields
<|code_end|>
| api/data_refinery_api/serializers.py
<|code_start|>from collections import defaultdict
from django.db.models import Count, Q
from rest_framework import serializers
import boto3
from django_elasticsearch_dsl_drf.serializers import DocumentSerializer
from data_refinery_common.models import (
APIToken,
CompendiumResult,
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
ExperimentAnnotation,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
Sample,
SampleAnnotation,
SurveyJob,
)
from data_refinery_common.models.documents import ExperimentDocument
s3 = boto3.client("s3")
##
# Organism
##
class OrganismSerializer(serializers.ModelSerializer):
class Meta:
model = Organism
fields = (
"name",
"taxonomy_id",
)
##
# Processor
##
class ProcessorSerializer(serializers.ModelSerializer):
class Meta:
model = Processor
fields = ("id", "name", "version", "docker_image", "environment")
##
# Transcriptome Index
##
class OrganismIndexSerializer(serializers.ModelSerializer):
organism_name = serializers.StringRelatedField(source="organism", read_only=True)
download_url = serializers.SerializerMethodField()
class Meta:
model = OrganismIndex
fields = (
"id",
"assembly_name",
"organism_name",
"source_version",
"index_type",
"salmon_version",
"download_url",
"result_id",
"last_modified",
)
read_only_fields = fields
def get_download_url(self, obj):
computed_file = obj.get_computed_file()
if computed_file is not None:
return computed_file.s3_url
return None
##
# Results
##
class DetailedExperimentSampleSerializer(serializers.ModelSerializer):
class Meta:
model = Sample
fields = (
"accession_code",
"platform_name",
"pretty_platform",
"technology",
"is_processed",
)
class ComputationalResultAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = ComputationalResultAnnotation
fields = ("id", "data", "is_ccdl", "created_at", "last_modified")
class ComputedFileSerializer(serializers.ModelSerializer):
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"size_in_bytes",
"is_smashable",
"is_qc",
"sha1",
"s3_bucket",
"s3_key",
"created_at",
"last_modified",
)
class ComputedFileWithUrlSerializer(serializers.ModelSerializer):
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"size_in_bytes",
"is_smashable",
"is_qc",
"sha1",
"s3_bucket",
"s3_key",
"download_url",
"created_at",
"last_modified",
)
class ComputationalResultSerializer(serializers.ModelSerializer):
annotations = ComputationalResultAnnotationSerializer(
many=True, source="computationalresultannotation_set"
)
processor = ProcessorSerializer(many=False)
organism_index = OrganismIndexSerializer(many=False)
files = ComputedFileSerializer(many=True, source="computedfile_set")
class Meta:
model = ComputationalResult
fields = (
"id",
"commands",
"processor",
"is_ccdl",
"annotations",
"files",
"organism_index",
"time_start",
"time_end",
"created_at",
"last_modified",
)
class ComputationalResultWithUrlSerializer(ComputationalResultSerializer):
files = ComputedFileWithUrlSerializer(many=True, source="computedfile_set")
class ComputationalResultNoFilesSerializer(serializers.ModelSerializer):
annotations = ComputationalResultAnnotationSerializer(
many=True, source="computationalresultannotation_set"
)
processor = ProcessorSerializer(many=False)
organism_index = OrganismIndexSerializer(many=False)
class Meta:
model = ComputationalResult
fields = (
"id",
"commands",
"processor",
"is_ccdl",
"annotations",
"organism_index",
"time_start",
"time_end",
"created_at",
"last_modified",
)
class QNTargetSerializer(serializers.ModelSerializer):
result = ComputationalResultNoFilesSerializer(many=False)
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"size_in_bytes",
"is_qn_target",
"sha1",
"s3_bucket",
"s3_key",
"s3_url",
"created_at",
"last_modified",
"result",
)
class ComputedFileListSerializer(serializers.ModelSerializer):
result = ComputationalResultNoFilesSerializer(many=False)
samples = DetailedExperimentSampleSerializer(many=True)
compendia_organism_name = serializers.CharField(
source="compendia_organism__name", read_only=True
)
def __init__(self, *args, **kwargs):
super(ComputedFileListSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"samples",
"size_in_bytes",
"is_qn_target",
"is_smashable",
"is_qc",
"is_compendia",
"quant_sf_only",
"compendia_version",
"compendia_organism_name",
"sha1",
"s3_bucket",
"s3_key",
"s3_url",
"download_url",
"created_at",
"last_modified",
"result",
)
extra_kwargs = {
"download_url": {
"help_text": "This will contain an url to download the file. You must send a valid [token](#tag/token) in order to receive this."
}
}
class OriginalFileListSerializer(serializers.ModelSerializer):
class Meta:
model = OriginalFile
fields = (
"id",
"filename",
"samples",
"size_in_bytes",
"sha1",
"samples",
"processor_jobs",
"downloader_jobs",
"source_url",
"is_archive",
"source_filename",
"has_raw",
"created_at",
"last_modified",
)
##
# Samples
##
class SampleSerializer(serializers.ModelSerializer):
organism = OrganismSerializer(many=False)
class Meta:
model = Sample
fields = (
"id",
"title",
"accession_code",
"source_database",
"organism",
"platform_accession_code",
"platform_name",
"pretty_platform",
"technology",
"manufacturer",
"protocol_info",
"is_processed",
"created_at",
"last_modified",
)
class SampleAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = SampleAnnotation
fields = (
"data",
"is_ccdl",
"created_at",
"last_modified",
)
class DetailedSamplesComputationalResultSerializer(serializers.ModelSerializer):
processor = ProcessorSerializer(many=False)
organism_index = OrganismIndexSerializer(many=False)
class Meta:
model = ComputationalResult
fields = (
"id",
"processor",
"organism_index",
)
class DetailedSampleSerializer(serializers.ModelSerializer):
annotations = SampleAnnotationSerializer(many=True, source="sampleannotation_set")
organism = OrganismSerializer(many=False)
results = DetailedSamplesComputationalResultSerializer(many=True)
class Meta:
model = Sample
fields = (
"id",
"title",
"accession_code",
"source_database",
"organism",
"platform_accession_code",
"platform_name",
"pretty_platform",
"technology",
"manufacturer",
"protocol_info",
"annotations",
"results",
"source_archive_url",
"has_raw",
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
"is_processed",
"created_at",
"last_modified",
"original_files",
"computed_files",
)
##
# Experiments
##
class ExperimentSerializer(serializers.ModelSerializer):
organism_names = serializers.StringRelatedField(many=True, source="organisms", read_only=True)
platforms = serializers.ReadOnlyField()
processed_samples = serializers.StringRelatedField(many=True)
total_samples_count = serializers.IntegerField(read_only=True)
sample_metadata = serializers.ReadOnlyField(source="get_sample_metadata_fields")
technologies = serializers.ReadOnlyField(source="get_sample_technologies")
pretty_platforms = serializers.ReadOnlyField()
class Meta:
model = Experiment
fields = (
"id",
"title",
"description",
"accession_code",
"alternate_accession_code",
"source_database",
"source_url",
"platforms",
"pretty_platforms",
"processed_samples",
"has_publication",
"publication_title",
"publication_doi",
"publication_authors",
"pubmed_id",
"total_samples_count",
"organism_names",
"submitter_institution",
"created_at",
"last_modified",
"source_first_published",
"source_last_modified",
"sample_metadata",
"technologies",
)
@staticmethod
def setup_eager_loading(queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related("samples").prefetch_related("organisms")
# Multiple count annotations
queryset = queryset.annotate(
total_samples_count=Count("samples", unique=True),
processed_samples_count=Count("samples", filter=Q(samples__is_processed=True)),
)
return queryset
class ExperimentAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = ExperimentAnnotation
fields = (
"data",
"is_ccdl",
"created_at",
"last_modified",
)
class DetailedExperimentSerializer(serializers.ModelSerializer):
annotations = ExperimentAnnotationSerializer(many=True, source="experimentannotation_set")
samples = DetailedExperimentSampleSerializer(many=True)
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
organism_names = serializers.StringRelatedField(many=True, source="organisms", read_only=True)
class Meta:
model = Experiment
fields = (
"id",
"title",
"description",
"annotations",
"samples",
"protocol_description",
"accession_code",
"source_database",
"source_url",
"has_publication",
"publication_title",
"publication_doi",
"publication_authors",
"pubmed_id",
"source_first_published",
"source_last_modified",
"submitter_institution",
"last_modified",
"created_at",
"organism_names",
"sample_metadata",
"num_total_samples",
"num_processed_samples",
"num_downloadable_samples",
)
class PlatformSerializer(serializers.ModelSerializer):
class Meta:
model = Sample
fields = (
"platform_accession_code",
"platform_name",
)
class InstitutionSerializer(serializers.ModelSerializer):
class Meta:
model = Experiment
fields = ("submitter_institution",)
##
# Files
##
class OriginalFileSerializer(serializers.ModelSerializer):
class Meta:
model = OriginalFile
fields = (
"id",
"filename",
"size_in_bytes",
"sha1",
"source_url",
"source_filename",
"is_downloaded",
"is_archive",
"has_raw",
"created_at",
"last_modified",
)
##
# Jobs
##
class SurveyJobSerializer(serializers.ModelSerializer):
class Meta:
model = SurveyJob
fields = (
"id",
"source_type",
"success",
"start_time",
"end_time",
"created_at",
"last_modified",
)
class DownloaderJobSerializer(serializers.ModelSerializer):
class Meta:
model = DownloaderJob
fields = (
"id",
"downloader_task",
"num_retries",
"retried",
"was_recreated",
"worker_id",
"worker_version",
"nomad_job_id",
"failure_reason",
"success",
"original_files",
"start_time",
"end_time",
"created_at",
"last_modified",
)
class ProcessorJobSerializer(serializers.ModelSerializer):
class Meta:
model = ProcessorJob
fields = (
"id",
"pipeline_applied",
"num_retries",
"retried",
"worker_id",
"ram_amount",
"volume_index",
"worker_version",
"failure_reason",
"nomad_job_id",
"success",
"original_files",
"datasets",
"start_time",
"end_time",
"created_at",
"last_modified",
)
##
# Datasets
##
def validate_dataset(data):
"""
Dataset validation. Each experiment should always have at least one
sample, all samples should be downloadable, and when starting the smasher
there should be at least one experiment.
"""
if data.get("data") is None or type(data["data"]) != dict:
raise serializers.ValidationError("`data` must be a dict of lists.")
if data.get("start") and len(data["data"]) == 0:
raise serializers.ValidationError("`data` must contain at least one experiment..")
accessions = []
for key, value in data["data"].items():
if type(value) != list:
raise serializers.ValidationError(
"`data` must be a dict of lists. Problem with `" + str(key) + "`"
)
if len(value) < 1:
raise serializers.ValidationError(
"`data` must be a dict of lists, each with one or more elements. Problem with `"
+ str(key)
+ "`"
)
try:
if len(value) != len(set(value)):
raise serializers.ValidationError("Duplicate values detected in " + str(value))
except Exception as e:
raise serializers.ValidationError("Received bad dataset data: " + str(e))
# If they want "ALL", just make sure that the experiment has at least one downloadable sample
if value == ["ALL"]:
try:
experiment = Experiment.processed_public_objects.get(accession_code=key)
except Exception as e:
raise serializers.ValidationError(
"Experiment " + key + " does not have at least one downloadable sample"
)
# Otherwise, we will check that all the samples they requested are downloadable
else:
accessions.extend(value)
if len(accessions) > 0:
unprocessed_samples = Sample.public_objects.filter(
accession_code__in=accessions, is_processed=False
)
if unprocessed_samples.count() > 0:
raise serializers.ValidationError(
"Non-downloadable sample(s) '"
+ ", ".join([s.accession_code for s in unprocessed_samples])
+ "' in dataset"
)
class CreateDatasetSerializer(serializers.ModelSerializer):
class Meta:
model = Dataset
fields = ("id", "data", "email_address", "email_ccdl_ok")
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
""" This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {"required": True,},
"id": {"read_only": True,},
"is_processing": {"read_only": True,},
"is_processed": {"read_only": True,},
"is_available": {"read_only": True,},
"expires_on": {"read_only": True,},
"s3_bucket": {"read_only": True,},
"s3_key": {"read_only": True,},
"success": {"read_only": True,},
"failure_reason": {"read_only": True,},
"created_at": {"read_only": True,},
"last_modified": {"read_only": True,},
"size_in_bytes": {"read_only": True,},
"sha1": {"read_only": True,},
"download_url": {"read_only": True,},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
class APITokenSerializer(serializers.ModelSerializer):
class Meta:
model = APIToken
fields = ("id", "is_activated", "terms_and_conditions")
extra_kwargs = {
"id": {"read_only": True},
"is_activated": {"read_only": False},
"terms_and_conditions": {"read_only": True},
}
class CompendiumResultSerializer(serializers.ModelSerializer):
primary_organism_name = serializers.StringRelatedField(
read_only=True, source="primary_organism"
)
organism_names = serializers.StringRelatedField(many=True, source="organisms", read_only=True)
computed_file = ComputedFileSerializer(source="get_computed_file", read_only=True)
class Meta:
model = CompendiumResult
fields = (
"id",
"primary_organism_name",
"organism_names",
"svd_algorithm",
"quant_sf_only",
"compendium_version",
"computed_file",
)
read_only_fields = fields
class CompendiumResultWithUrlSerializer(serializers.ModelSerializer):
primary_organism_name = serializers.StringRelatedField(
read_only=True, source="primary_organism"
)
organism_names = serializers.StringRelatedField(many=True, source="organisms", read_only=True)
computed_file = ComputedFileWithUrlSerializer(source="get_computed_file", read_only=True)
class Meta:
model = CompendiumResult
fields = (
"id",
"primary_organism_name",
"organism_names",
"svd_algorithm",
"quant_sf_only",
"compendium_version",
"computed_file",
)
read_only_fields = fields
##
# ElasticSearch Document Serializers
##
class ExperimentDocumentSerializer(DocumentSerializer):
"""Serializer for the Experiment document."""
class Meta(object):
"""Meta options."""
document = ExperimentDocument
fields = (
"id",
"title",
"publication_title",
"description",
"technology",
"accession_code",
"alternate_accession_code",
"submitter_institution",
"has_publication",
"publication_doi",
"publication_authors",
"sample_metadata_fields",
"platform_names",
"platform_accession_codes",
"organism_names",
"pubmed_id",
"num_total_samples",
"num_processed_samples",
"num_downloadable_samples",
"source_first_published",
)
read_only_fields = fields
<|code_end|>
|
Make the dataset API simpler to use
### Context
Pairing with @kurtwheeler on the email validation code for the dataset endpoint this morning
### Problem or idea
@kurtwheeler pointed out that there is no good reason why we have to start the datset using a PUT request, except that is how we do it on the frontend. If everything is valid immediately, you should be able to create and dispatch a job in your initial POST request.
### Solution or next step
Refactor the dataset creation code so that it does not use a separate view for creating the dataset, and just validate POST requests the same way we validate PUT requests.
| api/data_refinery_api/urls.py
<|code_start|>from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import include, path
from django.views.generic import RedirectView
from rest_framework import permissions
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from data_refinery_api.views import (
AboutStats,
APITokenView,
CompendiumResultDetailView,
CompendiumResultListView,
ComputationalResultDetailView,
ComputationalResultListView,
ComputedFileDetailView,
ComputedFileListView,
CreateAPITokenView,
CreateDatasetView,
DatasetView,
DownloaderJobDetailView,
DownloaderJobListView,
ExperimentDetailView,
ExperimentDocumentView,
ExperimentListView,
FailedDownloaderJobStats,
FailedProcessorJobStats,
InstitutionListView,
OrganismDetailView,
OrganismListView,
OriginalFileDetailView,
OriginalFileListView,
PlatformListView,
ProcessorDetailView,
ProcessorJobDetailView,
ProcessorJobListView,
ProcessorListView,
QNTargetsAvailable,
QNTargetsDetailView,
SampleDetailView,
SampleListView,
Stats,
SurveyJobDetailView,
SurveyJobListView,
TranscriptomeIndexDetailView,
TranscriptomeIndexListView,
)
# error handlers
def handle404error(request, exception):
message = "The requested resource was not found on this server."
url = "https://api.refine.bio/"
# check to see if the 404ed request contained a version
if not match(r"^/v[1-9]/.*", request.path):
message = "refine.bio API resources are only available through versioned requests."
return JsonResponse({"message": message, "docs": url, "status_code": 404,}, status=404)
def handle500error(request):
return JsonResponse(
{"message": "A server error occured. This has been reported.", "status_code": 500,},
status=500,
)
# This provides _public_ access to the /admin interface!
# Enabling this by setting DEBUG to true this will allow unauthenticated access to the admin interface.
# Very useful for debugging (since we have no User accounts), but very dangerous for prod!
class AccessUser:
has_module_perms = has_perm = __getattr__ = lambda s, *a, **kw: True
if settings.DEBUG:
admin.site.has_permission = lambda r: setattr(r, "user", AccessUser()) or True
schema_view = get_schema_view(
openapi.Info(
title="Refine.bio API",
default_version="v1",
description="""
refine.bio is a multi-organism collection of genome-wide transcriptome or gene expression data that has been obtained from publicly available repositories and uniformly processed and normalized. refine.bio allows biologists, clinicians, and machine learning researchers to search for experiments from different source repositories all in one place and build custom data sets for their questions of interest.
The swagger-ui view can be found [here](http://api.refine.bio/swagger/).
The ReDoc view can be found [here](http://api.refine.bio/).
Additional documentation can be found at [docs.refine.bio](http://docs.refine.bio/en/latest/).
### Questions/Feedback?
If you have a question or comment, please [file an issue on GitHub](https://github.com/AlexsLemonade/refinebio/issues) or send us an email at [ccdl@alexslemonade.org](mailto:ccdl@alexslemonade.org).
""",
terms_of_service="https://www.refine.bio/terms",
contact=openapi.Contact(email="ccdl@alexslemonade.org"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(
r"^(?P<version>v1)/",
include(
[
# Primary search and filter interface
url(r"^search/$", ExperimentDocumentView.as_view({"get": "list"}), name="search"),
url(r"^experiments/$", ExperimentListView.as_view(), name="experiments"),
url(
r"^experiments/(?P<accession_code>.+)/$",
ExperimentDetailView.as_view(),
name="experiments_detail",
),
url(r"^samples/$", SampleListView.as_view(), name="samples"),
url(
r"^samples/(?P<accession_code>.+)/$",
SampleDetailView.as_view(),
name="samples_detail",
),
url(r"^organisms/$", OrganismListView.as_view(), name="organisms"),
url(
r"^organisms/(?P<name>.+)/$",
OrganismDetailView.as_view(),
name="organisms_detail",
),
url(r"^platforms/$", PlatformListView.as_view(), name="platforms"),
url(r"^institutions/$", InstitutionListView.as_view(), name="institutions"),
url(r"^processors/$", ProcessorListView.as_view(), name="processors"),
url(
r"^processors/(?P<id>[0-9a-f-]+)/$",
ProcessorDetailView.as_view(),
name="processors_detail",
),
# Deliverables
url(r"^dataset/$", CreateDatasetView.as_view(), name="create_dataset"),
url(r"^dataset/(?P<id>[0-9a-f-]+)/$", DatasetView.as_view(), name="dataset"),
url(r"^token/$", CreateAPITokenView.as_view(), name="token"),
url(r"^token/(?P<id>[0-9a-f-]+)/$", APITokenView.as_view(), name="token_id"),
# Jobs
url(r"^jobs/survey/$", SurveyJobListView.as_view(), name="survey_jobs"),
url(
r"^jobs/survey/(?P<id>[0-9a-f-]+)/$",
SurveyJobDetailView.as_view(),
name="survey_jobs_detail",
),
url(r"^jobs/downloader/$", DownloaderJobListView.as_view(), name="downloader_jobs"),
url(
r"^jobs/downloader/(?P<id>[0-9a-f-]+)/$",
DownloaderJobDetailView.as_view(),
name="downloader_jobs_detail",
),
url(r"^jobs/processor/$", ProcessorJobListView.as_view(), name="processor_jobs"),
url(
r"^jobs/processor/(?P<id>[0-9a-f-]+)/$",
ProcessorJobDetailView.as_view(),
name="processor_jobs_detail",
),
# Dashboard Driver
url(r"^stats/$", Stats.as_view(), name="stats"),
url(
r"^stats/failures/downloader$",
FailedDownloaderJobStats.as_view(),
name="stats_failed_downloader",
),
url(
r"^stats/failures/processor$",
FailedProcessorJobStats.as_view(),
name="stats_failed_processor",
),
url(r"^stats-about/$", AboutStats.as_view(), name="stats_about"),
# Transcriptome Indices
path(
"transcriptome_indices/",
include(
[
path(
"",
TranscriptomeIndexListView.as_view(),
name="transcriptome_indices",
),
path(
"<int:id>",
TranscriptomeIndexDetailView.as_view(),
name="transcriptome_indices_read",
),
]
),
),
# QN Targets
url(r"^qn_targets/$", QNTargetsAvailable.as_view(), name="qn_targets_available"),
url(
r"^qn_targets/(?P<organism_name>.+)$",
QNTargetsDetailView.as_view(),
name="qn_targets",
),
# Computed Files
url(r"^computed_files/$", ComputedFileListView.as_view(), name="computed_files"),
url(
r"^computed_files/(?P<id>[0-9a-f-]+)/$",
ComputedFileDetailView.as_view(),
name="computed_files_detail",
),
url(r"^original_files/$", OriginalFileListView.as_view(), name="original_files"),
url(
r"^original_files/(?P<id>[0-9a-f-]+)/$",
OriginalFileDetailView.as_view(),
name="original_files_detail",
),
url(
r"^computational_results/$",
ComputationalResultListView.as_view(),
name="results",
),
url(
r"^computational_results/(?P<id>[0-9a-f-]+)/$",
ComputationalResultDetailView.as_view(),
name="results_detail",
),
# Compendia
url(r"^compendia/$", CompendiumResultListView.as_view(), name="compendium_results"),
url(
r"^compendia/(?P<id>[0-9]+)/$",
CompendiumResultDetailView.as_view(),
name="compendium_result",
),
# v1 api docs
url(
r"^swagger/$",
schema_view.with_ui("swagger", cache_timeout=0),
name="schema_swagger_ui",
),
url(r"^$", schema_view.with_ui("redoc", cache_timeout=0), name="schema_redoc"),
]
),
),
# Admin
url(r"^admin/", admin.site.urls),
# Redirect root urls to latest version api docs
url(r"^swagger/$", RedirectView.as_view(url="/v1/swagger")),
url(r"^$", RedirectView.as_view(url="/v1")),
]
# handle errors
handler404 = handle404error
handler500 = handle500error
<|code_end|>
api/data_refinery_api/views/__init__.py
<|code_start|>from data_refinery_api.views.api_token import APITokenView, CreateAPITokenView
from data_refinery_api.views.compendium_result import (
CompendiumResultDetailView,
CompendiumResultListView,
)
from data_refinery_api.views.computational_result import (
ComputationalResultDetailView,
ComputationalResultListView,
)
from data_refinery_api.views.computed_file import ComputedFileDetailView, ComputedFileListView
from data_refinery_api.views.dataset import CreateDatasetView, DatasetView
from data_refinery_api.views.experiment import ExperimentDetailView, ExperimentListView
from data_refinery_api.views.experiment_document import ExperimentDocumentView
from data_refinery_api.views.institution import InstitutionListView
from data_refinery_api.views.jobs import (
DownloaderJobDetailView,
DownloaderJobListView,
ProcessorJobDetailView,
ProcessorJobListView,
SurveyJobDetailView,
SurveyJobListView,
)
from data_refinery_api.views.organism import OrganismDetailView, OrganismListView
from data_refinery_api.views.original_file import OriginalFileDetailView, OriginalFileListView
from data_refinery_api.views.platform import PlatformListView
from data_refinery_api.views.processor import ProcessorDetailView, ProcessorListView
from data_refinery_api.views.qn_targets import QNTargetsAvailable, QNTargetsDetailView
from data_refinery_api.views.sample import SampleDetailView, SampleListView
from data_refinery_api.views.stats import (
AboutStats,
FailedDownloaderJobStats,
FailedProcessorJobStats,
Stats,
)
from data_refinery_api.views.transcriptome_index import (
TranscriptomeIndexDetailView,
TranscriptomeIndexListView,
)
<|code_end|>
api/data_refinery_api/views/dataset.py
<|code_start|>##
# Contains CreateDatasetView and DatasetView
##
from collections import defaultdict
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from rest_framework import filters, generics, serializers
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_common.models import (
APIToken,
Dataset,
DatasetAnnotation,
Experiment,
Organism,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
)
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
def validate_dataset(data):
"""
Dataset validation. Each experiment should always have at least one
sample, all samples should be downloadable, and when starting the smasher
there should be at least one experiment.
"""
if data.get("data") is None or type(data["data"]) != dict:
raise serializers.ValidationError("`data` must be a dict of lists.")
if data.get("start") and len(data["data"]) == 0:
raise serializers.ValidationError("`data` must contain at least one experiment..")
accessions = []
for key, value in data["data"].items():
if type(value) != list:
raise serializers.ValidationError(
"`data` must be a dict of lists. Problem with `" + str(key) + "`"
)
if len(value) < 1:
raise serializers.ValidationError(
"`data` must be a dict of lists, each with one or more elements. Problem with `"
+ str(key)
+ "`"
)
try:
if len(value) != len(set(value)):
raise serializers.ValidationError("Duplicate values detected in " + str(value))
except Exception as e:
raise serializers.ValidationError("Received bad dataset data: " + str(e))
# If they want "ALL", just make sure that the experiment has at least one downloadable sample
if value == ["ALL"]:
try:
experiment = Experiment.processed_public_objects.get(accession_code=key)
except Exception as e:
raise serializers.ValidationError(
"Experiment " + key + " does not have at least one downloadable sample"
)
# Otherwise, we will check that all the samples they requested are downloadable
else:
accessions.extend(value)
if len(accessions) > 0:
unprocessed_samples = Sample.public_objects.filter(
accession_code__in=accessions, is_processed=False
)
if unprocessed_samples.count() > 0:
raise serializers.ValidationError(
"Non-downloadable sample(s) '"
+ ", ".join([s.accession_code for s in unprocessed_samples])
+ "' in dataset"
)
class CreateDatasetSerializer(serializers.ModelSerializer):
class Meta:
model = Dataset
fields = ("id", "data", "email_address", "email_ccdl_ok")
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
""" This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {"required": True,},
"id": {"read_only": True,},
"is_processing": {"read_only": True,},
"is_processed": {"read_only": True,},
"is_available": {"read_only": True,},
"expires_on": {"read_only": True,},
"s3_bucket": {"read_only": True,},
"s3_key": {"read_only": True,},
"success": {"read_only": True,},
"failure_reason": {"read_only": True,},
"created_at": {"read_only": True,},
"last_modified": {"read_only": True,},
"size_in_bytes": {"read_only": True,},
"sha1": {"read_only": True,},
"download_url": {"read_only": True,},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(
name="get",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="patch", decorator=swagger_auto_schema(auto_schema=None)
) # partial updates not supported
@method_decorator(
name="put",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that.
"""
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data["data"].keys():
accessions = new_data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
new_data["data"][key] = sample_codes
if old_object.is_processed:
raise serializers.ValidationError(
"You may not update Datasets which have already been processed"
)
if new_data.get("start"):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
APIToken.objects.get(id=token_id, is_activated=True)
# General APIToken.DoesNotExist or django.core.exceptions.ValidationError
except Exception:
raise serializers.ValidationError("You must provide an active API token ID")
supplied_email_address = self.request.data.get("email_address", None)
if supplied_email_address is None:
raise serializers.ValidationError("You must provide an email address.")
email_ccdl_ok = self.request.data.get("email_ccdl_ok", False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
# create a new dataset annotation with the information of this request
annotation = DatasetAnnotation()
annotation.dataset = old_object
annotation.data = {
"start": True,
"ip": get_client_ip(self.request),
"user_agent": self.request.META.get("HTTP_USER_AGENT", None),
}
annotation.save()
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data["data"] = old_data
serializer.validated_data["aggregate_by"] = old_aggregate
serializer.save()
<|code_end|>
| api/data_refinery_api/urls.py
<|code_start|>from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import include, path
from django.views.generic import RedirectView
from rest_framework import permissions
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from data_refinery_api.views import (
AboutStats,
APITokenView,
CompendiumResultDetailView,
CompendiumResultListView,
ComputationalResultDetailView,
ComputationalResultListView,
ComputedFileDetailView,
ComputedFileListView,
CreateAPITokenView,
DatasetView,
DownloaderJobDetailView,
DownloaderJobListView,
ExperimentDetailView,
ExperimentDocumentView,
ExperimentListView,
FailedDownloaderJobStats,
FailedProcessorJobStats,
InstitutionListView,
OrganismDetailView,
OrganismListView,
OriginalFileDetailView,
OriginalFileListView,
PlatformListView,
ProcessorDetailView,
ProcessorJobDetailView,
ProcessorJobListView,
ProcessorListView,
QNTargetsAvailable,
QNTargetsDetailView,
SampleDetailView,
SampleListView,
Stats,
SurveyJobDetailView,
SurveyJobListView,
TranscriptomeIndexDetailView,
TranscriptomeIndexListView,
)
# error handlers
def handle404error(request, exception):
message = "The requested resource was not found on this server."
url = "https://api.refine.bio/"
# check to see if the 404ed request contained a version
if not match(r"^/v[1-9]/.*", request.path):
message = "refine.bio API resources are only available through versioned requests."
return JsonResponse({"message": message, "docs": url, "status_code": 404,}, status=404)
def handle500error(request):
return JsonResponse(
{"message": "A server error occured. This has been reported.", "status_code": 500,},
status=500,
)
# This provides _public_ access to the /admin interface!
# Enabling this by setting DEBUG to true this will allow unauthenticated access to the admin interface.
# Very useful for debugging (since we have no User accounts), but very dangerous for prod!
class AccessUser:
has_module_perms = has_perm = __getattr__ = lambda s, *a, **kw: True
if settings.DEBUG:
admin.site.has_permission = lambda r: setattr(r, "user", AccessUser()) or True
schema_view = get_schema_view(
openapi.Info(
title="Refine.bio API",
default_version="v1",
description="""
refine.bio is a multi-organism collection of genome-wide transcriptome or gene expression data that has been obtained from publicly available repositories and uniformly processed and normalized. refine.bio allows biologists, clinicians, and machine learning researchers to search for experiments from different source repositories all in one place and build custom data sets for their questions of interest.
The swagger-ui view can be found [here](http://api.refine.bio/swagger/).
The ReDoc view can be found [here](http://api.refine.bio/).
Additional documentation can be found at [docs.refine.bio](http://docs.refine.bio/en/latest/).
### Questions/Feedback?
If you have a question or comment, please [file an issue on GitHub](https://github.com/AlexsLemonade/refinebio/issues) or send us an email at [ccdl@alexslemonade.org](mailto:ccdl@alexslemonade.org).
""",
terms_of_service="https://www.refine.bio/terms",
contact=openapi.Contact(email="ccdl@alexslemonade.org"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(
r"^(?P<version>v1)/",
include(
[
# Primary search and filter interface
url(r"^search/$", ExperimentDocumentView.as_view({"get": "list"}), name="search"),
url(r"^experiments/$", ExperimentListView.as_view(), name="experiments"),
url(
r"^experiments/(?P<accession_code>.+)/$",
ExperimentDetailView.as_view(),
name="experiments_detail",
),
url(r"^samples/$", SampleListView.as_view(), name="samples"),
url(
r"^samples/(?P<accession_code>.+)/$",
SampleDetailView.as_view(),
name="samples_detail",
),
url(r"^organisms/$", OrganismListView.as_view(), name="organisms"),
url(
r"^organisms/(?P<name>.+)/$",
OrganismDetailView.as_view(),
name="organisms_detail",
),
url(r"^platforms/$", PlatformListView.as_view(), name="platforms"),
url(r"^institutions/$", InstitutionListView.as_view(), name="institutions"),
url(r"^processors/$", ProcessorListView.as_view(), name="processors"),
url(
r"^processors/(?P<id>[0-9a-f-]+)/$",
ProcessorDetailView.as_view(),
name="processors_detail",
),
# Deliverables
url(r"^dataset/$", DatasetView.as_view({"post": "create"}), name="create_dataset"),
url(
r"^dataset/(?P<id>[0-9a-f-]+)/$",
DatasetView.as_view({"get": "retrieve", "put": "update"}),
name="dataset",
),
url(r"^token/$", CreateAPITokenView.as_view(), name="token"),
url(r"^token/(?P<id>[0-9a-f-]+)/$", APITokenView.as_view(), name="token_id"),
# Jobs
url(r"^jobs/survey/$", SurveyJobListView.as_view(), name="survey_jobs"),
url(
r"^jobs/survey/(?P<id>[0-9a-f-]+)/$",
SurveyJobDetailView.as_view(),
name="survey_jobs_detail",
),
url(r"^jobs/downloader/$", DownloaderJobListView.as_view(), name="downloader_jobs"),
url(
r"^jobs/downloader/(?P<id>[0-9a-f-]+)/$",
DownloaderJobDetailView.as_view(),
name="downloader_jobs_detail",
),
url(r"^jobs/processor/$", ProcessorJobListView.as_view(), name="processor_jobs"),
url(
r"^jobs/processor/(?P<id>[0-9a-f-]+)/$",
ProcessorJobDetailView.as_view(),
name="processor_jobs_detail",
),
# Dashboard Driver
url(r"^stats/$", Stats.as_view(), name="stats"),
url(
r"^stats/failures/downloader$",
FailedDownloaderJobStats.as_view(),
name="stats_failed_downloader",
),
url(
r"^stats/failures/processor$",
FailedProcessorJobStats.as_view(),
name="stats_failed_processor",
),
url(r"^stats-about/$", AboutStats.as_view(), name="stats_about"),
# Transcriptome Indices
path(
"transcriptome_indices/",
include(
[
path(
"",
TranscriptomeIndexListView.as_view(),
name="transcriptome_indices",
),
path(
"<int:id>",
TranscriptomeIndexDetailView.as_view(),
name="transcriptome_indices_read",
),
]
),
),
# QN Targets
url(r"^qn_targets/$", QNTargetsAvailable.as_view(), name="qn_targets_available"),
url(
r"^qn_targets/(?P<organism_name>.+)$",
QNTargetsDetailView.as_view(),
name="qn_targets",
),
# Computed Files
url(r"^computed_files/$", ComputedFileListView.as_view(), name="computed_files"),
url(
r"^computed_files/(?P<id>[0-9a-f-]+)/$",
ComputedFileDetailView.as_view(),
name="computed_files_detail",
),
url(r"^original_files/$", OriginalFileListView.as_view(), name="original_files"),
url(
r"^original_files/(?P<id>[0-9a-f-]+)/$",
OriginalFileDetailView.as_view(),
name="original_files_detail",
),
url(
r"^computational_results/$",
ComputationalResultListView.as_view(),
name="results",
),
url(
r"^computational_results/(?P<id>[0-9a-f-]+)/$",
ComputationalResultDetailView.as_view(),
name="results_detail",
),
# Compendia
url(r"^compendia/$", CompendiumResultListView.as_view(), name="compendium_results"),
url(
r"^compendia/(?P<id>[0-9]+)/$",
CompendiumResultDetailView.as_view(),
name="compendium_result",
),
# v1 api docs
url(
r"^swagger/$",
schema_view.with_ui("swagger", cache_timeout=0),
name="schema_swagger_ui",
),
url(r"^$", schema_view.with_ui("redoc", cache_timeout=0), name="schema_redoc"),
]
),
),
# Admin
url(r"^admin/", admin.site.urls),
# Redirect root urls to latest version api docs
url(r"^swagger/$", RedirectView.as_view(url="/v1/swagger")),
url(r"^$", RedirectView.as_view(url="/v1")),
]
# handle errors
handler404 = handle404error
handler500 = handle500error
<|code_end|>
api/data_refinery_api/views/__init__.py
<|code_start|>from data_refinery_api.views.api_token import APITokenView, CreateAPITokenView
from data_refinery_api.views.compendium_result import (
CompendiumResultDetailView,
CompendiumResultListView,
)
from data_refinery_api.views.computational_result import (
ComputationalResultDetailView,
ComputationalResultListView,
)
from data_refinery_api.views.computed_file import ComputedFileDetailView, ComputedFileListView
from data_refinery_api.views.dataset import DatasetView
from data_refinery_api.views.experiment import ExperimentDetailView, ExperimentListView
from data_refinery_api.views.experiment_document import ExperimentDocumentView
from data_refinery_api.views.institution import InstitutionListView
from data_refinery_api.views.jobs import (
DownloaderJobDetailView,
DownloaderJobListView,
ProcessorJobDetailView,
ProcessorJobListView,
SurveyJobDetailView,
SurveyJobListView,
)
from data_refinery_api.views.organism import OrganismDetailView, OrganismListView
from data_refinery_api.views.original_file import OriginalFileDetailView, OriginalFileListView
from data_refinery_api.views.platform import PlatformListView
from data_refinery_api.views.processor import ProcessorDetailView, ProcessorListView
from data_refinery_api.views.qn_targets import QNTargetsAvailable, QNTargetsDetailView
from data_refinery_api.views.sample import SampleDetailView, SampleListView
from data_refinery_api.views.stats import (
AboutStats,
FailedDownloaderJobStats,
FailedProcessorJobStats,
Stats,
)
from data_refinery_api.views.transcriptome_index import (
TranscriptomeIndexDetailView,
TranscriptomeIndexListView,
)
<|code_end|>
api/data_refinery_api/views/dataset.py
<|code_start|>##
# Contains DatasetView
##
from collections import defaultdict
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from rest_framework import filters, generics, mixins, serializers, viewsets
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_common.models import (
APIToken,
Dataset,
DatasetAnnotation,
Experiment,
Organism,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
)
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
def validate_dataset(data):
"""
Dataset validation. Each experiment should always have at least one
sample, all samples should be downloadable, and when starting the smasher
there should be at least one experiment.
"""
if data.get("data") is None or type(data["data"]) != dict:
raise serializers.ValidationError("`data` must be a dict of lists.")
if data.get("start") and len(data["data"]) == 0:
raise serializers.ValidationError("`data` must contain at least one experiment..")
accessions = []
for key, value in data["data"].items():
if type(value) != list:
raise serializers.ValidationError(
"`data` must be a dict of lists. Problem with `" + str(key) + "`"
)
if len(value) < 1:
raise serializers.ValidationError(
"`data` must be a dict of lists, each with one or more elements. Problem with `"
+ str(key)
+ "`"
)
try:
if len(value) != len(set(value)):
raise serializers.ValidationError("Duplicate values detected in " + str(value))
except Exception as e:
raise serializers.ValidationError("Received bad dataset data: " + str(e))
# If they want "ALL", just make sure that the experiment has at least one downloadable sample
if value == ["ALL"]:
try:
experiment = Experiment.processed_public_objects.get(accession_code=key)
except Exception as e:
raise serializers.ValidationError(
"Experiment " + key + " does not have at least one downloadable sample"
)
# Otherwise, we will check that all the samples they requested are downloadable
else:
accessions.extend(value)
if len(accessions) > 0:
unprocessed_samples = Sample.public_objects.filter(
accession_code__in=accessions, is_processed=False
)
if unprocessed_samples.count() > 0:
raise serializers.ValidationError(
"Non-downloadable sample(s) '"
+ ", ".join([s.accession_code for s in unprocessed_samples])
+ "' in dataset"
)
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
""" This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
def create(self, validated_data):
# "start" isn't actually a field on the Dataset model, we just use it
# on the frontend to control when the dataset gets dispatched
if "start" in validated_data:
validated_data.pop("start")
return super(DatasetSerializer, self).create(validated_data)
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"email_address",
"email_ccdl_ok",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {"required": True,},
"id": {"read_only": True,},
"is_processing": {"read_only": True,},
"is_processed": {"read_only": True,},
"is_available": {"read_only": True,},
"email_address": {"required": False, "write_only": True},
"email_ccdl_ok": {"required": False, "write_only": True},
"expires_on": {"read_only": True,},
"s3_bucket": {"read_only": True,},
"s3_key": {"read_only": True,},
"success": {"read_only": True,},
"failure_reason": {"read_only": True,},
"created_at": {"read_only": True,},
"last_modified": {"read_only": True,},
"size_in_bytes": {"read_only": True,},
"sha1": {"read_only": True,},
"download_url": {"read_only": True,},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="update",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def validate_token(self):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
APIToken.objects.get(id=token_id, is_activated=True)
# General APIToken.DoesNotExist or django.core.exceptions.ValidationError
except Exception:
raise serializers.ValidationError("You must provide an active API token ID")
@staticmethod
def convert_ALL_to_accessions(data):
qn_organisms = Organism.get_objects_with_qn_targets()
for key in data["data"].keys():
accessions = data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
data["data"][key] = sample_codes
def validate_email_address_is_nonempty(self):
"""Check to make sure the email exists. We call this when getting ready to dispatch a dataset"""
supplied_email_address = self.request.data.get("email_address", None)
if supplied_email_address is None:
raise serializers.ValidationError("You must provide an email address.")
def dispatch_job(self, serializer, obj):
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = obj
pjda.save()
job_sent = False
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
# create a new dataset annotation with the information of this request
annotation = DatasetAnnotation()
annotation.dataset = obj
annotation.data = {
"start": True,
"ip": get_client_ip(self.request),
"user_agent": self.request.META.get("HTTP_USER_AGENT", None),
}
annotation.save()
def create_or_update(self, serializer):
""" If `start` is set, fire off the job. Otherwise just create/update the dataset"""
data = serializer.validated_data
DatasetView.convert_ALL_to_accessions(data)
if data.get("start"):
self.validate_token()
self.validate_email_address_is_nonempty()
obj = serializer.save()
self.dispatch_job(serializer, obj)
else:
serializer.save()
def perform_create(self, serializer):
# Since we are creating a new dataset, there is no way it is already processed
self.create_or_update(serializer)
def perform_update(self, serializer):
# Check to make sure we have not already processed the dataset
old_object = self.get_object()
if old_object.is_processed:
raise serializers.ValidationError(
"You may not update Datasets which have already been processed"
)
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
elif old_object.is_processing:
self.validate_email_address_is_nonempty()
serializer.validated_data["data"] = old_object.data
serializer.validated_data["aggregate_by"] = old_object.aggregate_by
serializer.save()
else:
self.create_or_update(serializer)
<|code_end|>
|
Make the response for non-downloadable samples more programmer-friendly
### Context
Right now the message is a string with an error message and accession codes inside of it.
### Problem or idea
We don't want users to have to parse the error message to get the accessions they need to remove from their dataset.
### Solution or next step
Can we make the serializer error return a dict with the error message and contents instead of a string?
| api/data_refinery_api/views/dataset.py
<|code_start|>##
# Contains CreateDatasetView and DatasetView
##
from collections import defaultdict
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from rest_framework import filters, generics, serializers
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_common.models import (
APIToken,
Dataset,
DatasetAnnotation,
Experiment,
Organism,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
)
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
def validate_dataset(data):
"""
Dataset validation. Each experiment should always have at least one
sample, all samples should be downloadable, and when starting the smasher
there should be at least one experiment.
"""
if data.get("data") is None or type(data["data"]) != dict:
raise serializers.ValidationError("`data` must be a dict of lists.")
if data.get("start") and len(data["data"]) == 0:
raise serializers.ValidationError("`data` must contain at least one experiment..")
accessions = []
for key, value in data["data"].items():
if type(value) != list:
raise serializers.ValidationError(
"`data` must be a dict of lists. Problem with `" + str(key) + "`"
)
if len(value) < 1:
raise serializers.ValidationError(
"`data` must be a dict of lists, each with one or more elements. Problem with `"
+ str(key)
+ "`"
)
try:
if len(value) != len(set(value)):
raise serializers.ValidationError("Duplicate values detected in " + str(value))
except Exception as e:
raise serializers.ValidationError("Received bad dataset data: " + str(e))
# If they want "ALL", just make sure that the experiment has at least one downloadable sample
if value == ["ALL"]:
if data.get("quant_sf_only", False):
try:
experiment = Experiment.public_objects.get(accession_code=key)
except Exception as e:
raise serializers.ValidationError("Experiment " + key + " does not exist")
samples = experiment.sample_set.filter(
# We only want samples with a quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples.count() == 0:
raise serializers.ValidationError(
"Experiment "
+ key
+ " does not have at least one sample with a quant.sf file"
)
else:
try:
experiment = Experiment.processed_public_objects.get(accession_code=key)
except Exception as e:
raise serializers.ValidationError(
"Experiment " + key + " does not have at least one downloadable sample"
)
# Otherwise, we will check that all the samples they requested are downloadable
else:
accessions.extend(value)
if len(accessions) == 0:
return
if data.get("quant_sf_only", False):
samples_without_quant_sf = Sample.public_objects.filter(
accession_code__in=accessions
).exclude(
# Exclude samples that have at least one uploaded quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples_without_quant_sf.count() > 0:
raise serializers.ValidationError(
"Sample(s) '"
+ ", ".join([s.accession_code for s in samples_without_quant_sf])
+ "' in dataset are missing quant.sf files"
)
else:
unprocessed_samples = Sample.public_objects.filter(
accession_code__in=accessions, is_processed=False
)
if unprocessed_samples.count() > 0:
raise serializers.ValidationError(
"Non-downloadable sample(s) '"
+ ", ".join([s.accession_code for s in unprocessed_samples])
+ "' in dataset"
)
class CreateDatasetSerializer(serializers.ModelSerializer):
class Meta:
model = Dataset
fields = ("id", "data", "email_address", "email_ccdl_ok")
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
""" This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {"required": True,},
"id": {"read_only": True,},
"is_processing": {"read_only": True,},
"is_processed": {"read_only": True,},
"is_available": {"read_only": True,},
"expires_on": {"read_only": True,},
"s3_bucket": {"read_only": True,},
"s3_key": {"read_only": True,},
"success": {"read_only": True,},
"failure_reason": {"read_only": True,},
"created_at": {"read_only": True,},
"last_modified": {"read_only": True,},
"size_in_bytes": {"read_only": True,},
"sha1": {"read_only": True,},
"download_url": {"read_only": True,},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(
name="get",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="patch", decorator=swagger_auto_schema(auto_schema=None)
) # partial updates not supported
@method_decorator(
name="put",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that.
"""
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data["data"].keys():
accessions = new_data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
new_data["data"][key] = sample_codes
if old_object.is_processed:
raise serializers.ValidationError(
"You may not update Datasets which have already been processed"
)
if new_data.get("start"):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
APIToken.objects.get(id=token_id, is_activated=True)
# General APIToken.DoesNotExist or django.core.exceptions.ValidationError
except Exception:
raise serializers.ValidationError("You must provide an active API token ID")
supplied_email_address = self.request.data.get("email_address", None)
if supplied_email_address is None:
raise serializers.ValidationError("You must provide an email address.")
email_ccdl_ok = self.request.data.get("email_ccdl_ok", False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
# create a new dataset annotation with the information of this request
annotation = DatasetAnnotation()
annotation.dataset = old_object
annotation.data = {
"start": True,
"ip": get_client_ip(self.request),
"user_agent": self.request.META.get("HTTP_USER_AGENT", None),
}
annotation.save()
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data["data"] = old_data
serializer.validated_data["aggregate_by"] = old_aggregate
serializer.save()
<|code_end|>
| api/data_refinery_api/views/dataset.py
<|code_start|>##
# Contains CreateDatasetView and DatasetView
##
from collections import defaultdict
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from rest_framework import filters, generics, serializers
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_common.models import (
APIToken,
Dataset,
DatasetAnnotation,
Experiment,
Organism,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
)
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
def experiment_has_downloadable_samples(experiment, quant_sf_only=False):
if quant_sf_only:
try:
experiment = Experiment.public_objects.get(accession_code=experiment)
except Exception as e:
return False
samples = experiment.sample_set.filter(
# We only want samples with a quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples.count() == 0:
return False
else:
try:
experiment = Experiment.processed_public_objects.get(accession_code=experiment)
except Exception as e:
return False
return True
def validate_dataset(data):
"""
Dataset validation. Each experiment should always have at least one
sample, all samples should be downloadable, and when starting the smasher
there should be at least one experiment.
"""
if data.get("data") is None or type(data["data"]) != dict:
raise serializers.ValidationError("`data` must be a dict of lists.")
if data.get("start") and len(data["data"]) == 0:
raise serializers.ValidationError("`data` must contain at least one experiment..")
accessions = []
non_downloadable_experiments = []
for key, value in data["data"].items():
if type(value) != list:
raise serializers.ValidationError(
"`data` must be a dict of lists. Problem with `" + str(key) + "`"
)
if len(value) < 1:
raise serializers.ValidationError(
"`data` must be a dict of lists, each with one or more elements. Problem with `"
+ str(key)
+ "`"
)
try:
if len(value) != len(set(value)):
raise serializers.ValidationError("Duplicate values detected in " + str(value))
except Exception as e:
raise serializers.ValidationError("Received bad dataset data: " + str(e))
# If they want "ALL", just make sure that the experiment has at least one downloadable sample
if value == ["ALL"]:
if not experiment_has_downloadable_samples(
key, quant_sf_only=data.get("quant_sf_only", False)
):
non_downloadable_experiments.append(key)
# Otherwise, we will check that all the samples they requested are downloadable
else:
accessions.extend(value)
if len(non_downloadable_experiments) != 0:
raise serializers.ValidationError(
{
"message": "Experiment(s) in dataset have zero downloadable samples. See `non_downloadable_experiments` for a full list",
"non_downloadable_experiments": non_downloadable_experiments,
}
)
if len(accessions) == 0:
return
if data.get("quant_sf_only", False):
samples_without_quant_sf = Sample.public_objects.filter(
accession_code__in=accessions
).exclude(
# Exclude samples that have at least one uploaded quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples_without_quant_sf.count() > 0:
raise serializers.ValidationError(
{
"message": "Sample(s) in dataset are missing quant.sf files. See `non_downloadable_samples` for a full list",
"non_downloadable_samples": [
s.accession_code for s in samples_without_quant_sf
],
},
)
else:
unprocessed_samples = Sample.public_objects.filter(
accession_code__in=accessions, is_processed=False
)
if unprocessed_samples.count() > 0:
raise serializers.ValidationError(
{
"message": "Non-downloadable sample(s) in dataset. See `non_downloadable_samples` for a full list",
"non_downloadable_samples": [s.accession_code for s in unprocessed_samples],
}
)
class CreateDatasetSerializer(serializers.ModelSerializer):
class Meta:
model = Dataset
fields = ("id", "data", "email_address", "email_ccdl_ok")
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
""" This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {"required": True,},
"id": {"read_only": True,},
"is_processing": {"read_only": True,},
"is_processed": {"read_only": True,},
"is_available": {"read_only": True,},
"expires_on": {"read_only": True,},
"s3_bucket": {"read_only": True,},
"s3_key": {"read_only": True,},
"success": {"read_only": True,},
"failure_reason": {"read_only": True,},
"created_at": {"read_only": True,},
"last_modified": {"read_only": True,},
"size_in_bytes": {"read_only": True,},
"sha1": {"read_only": True,},
"download_url": {"read_only": True,},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
try:
validate_dataset(data)
except Exception:
raise
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Datasets. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
@method_decorator(
name="get",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="patch", decorator=swagger_auto_schema(auto_schema=None)
) # partial updates not supported
@method_decorator(
name="put",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
return serializer_context
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that.
"""
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
qn_organisms = Organism.get_objects_with_qn_targets()
# We convert 'ALL' into the actual accession codes given
for key in new_data["data"].keys():
accessions = new_data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
new_data["data"][key] = sample_codes
if old_object.is_processed:
raise serializers.ValidationError(
"You may not update Datasets which have already been processed"
)
if new_data.get("start"):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
APIToken.objects.get(id=token_id, is_activated=True)
# General APIToken.DoesNotExist or django.core.exceptions.ValidationError
except Exception:
raise serializers.ValidationError("You must provide an active API token ID")
supplied_email_address = self.request.data.get("email_address", None)
if supplied_email_address is None:
raise serializers.ValidationError("You must provide an email address.")
email_ccdl_ok = self.request.data.get("email_ccdl_ok", False)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
if email_ccdl_ok:
obj.email_ccdl_ok = email_ccdl_ok
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
# create a new dataset annotation with the information of this request
annotation = DatasetAnnotation()
annotation.dataset = old_object
annotation.data = {
"start": True,
"ip": get_client_ip(self.request),
"user_agent": self.request.META.get("HTTP_USER_AGENT", None),
}
annotation.save()
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data["data"] = old_data
serializer.validated_data["aggregate_by"] = old_aggregate
serializer.save()
<|code_end|>
|
Make the token endpoint more programmer-friendly
### Context
I was writing up an example of how to create a dataset using the API
### Problem or idea
Just like it should only take one request to create a dataset if you have everything ready, I don't see a good reason why we need two requests to create an API token.
### Solution or next step
Refactor the token routes to be like the refactored dataset viewset.
| api/data_refinery_api/urls.py
<|code_start|>from re import match
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.http import JsonResponse
from django.urls import include, path
from django.views.generic import RedirectView
from rest_framework import permissions
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from data_refinery_api.views import (
AboutStats,
APITokenView,
CompendiumResultDetailView,
CompendiumResultListView,
ComputationalResultDetailView,
ComputationalResultListView,
ComputedFileDetailView,
ComputedFileListView,
CreateAPITokenView,
DatasetView,
DownloaderJobDetailView,
DownloaderJobListView,
ExperimentDetailView,
ExperimentDocumentView,
ExperimentListView,
FailedDownloaderJobStats,
FailedProcessorJobStats,
InstitutionListView,
OrganismDetailView,
OrganismListView,
OriginalFileDetailView,
OriginalFileListView,
PlatformListView,
ProcessorDetailView,
ProcessorJobDetailView,
ProcessorJobListView,
ProcessorListView,
QNTargetsAvailable,
QNTargetsDetailView,
SampleDetailView,
SampleListView,
Stats,
SurveyJobDetailView,
SurveyJobListView,
TranscriptomeIndexDetailView,
TranscriptomeIndexListView,
)
# error handlers
def handle404error(request, exception):
message = "The requested resource was not found on this server."
url = "https://api.refine.bio/"
# check to see if the 404ed request contained a version
if not match(r"^/v[1-9]/.*", request.path):
message = "refine.bio API resources are only available through versioned requests."
return JsonResponse({"message": message, "docs": url, "status_code": 404,}, status=404)
def handle500error(request):
return JsonResponse(
{"message": "A server error occured. This has been reported.", "status_code": 500,},
status=500,
)
# This provides _public_ access to the /admin interface!
# Enabling this by setting DEBUG to true this will allow unauthenticated access to the admin interface.
# Very useful for debugging (since we have no User accounts), but very dangerous for prod!
class AccessUser:
has_module_perms = has_perm = __getattr__ = lambda s, *a, **kw: True
if settings.DEBUG:
admin.site.has_permission = lambda r: setattr(r, "user", AccessUser()) or True
schema_view = get_schema_view(
openapi.Info(
title="Refine.bio API",
default_version="v1",
description="""
refine.bio is a multi-organism collection of genome-wide transcriptome or gene expression data that has been obtained from publicly available repositories and uniformly processed and normalized. refine.bio allows biologists, clinicians, and machine learning researchers to search for experiments from different source repositories all in one place and build custom data sets for their questions of interest.
The swagger-ui view can be found [here](http://api.refine.bio/swagger/).
The ReDoc view can be found [here](http://api.refine.bio/).
Additional documentation can be found at [docs.refine.bio](http://docs.refine.bio/en/latest/).
### Questions/Feedback?
If you have a question or comment, please [file an issue on GitHub](https://github.com/AlexsLemonade/refinebio/issues) or send us an email at [ccdl@alexslemonade.org](mailto:ccdl@alexslemonade.org).
""",
terms_of_service="https://www.refine.bio/terms",
contact=openapi.Contact(email="ccdl@alexslemonade.org"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(
r"^(?P<version>v1)/",
include(
[
# Primary search and filter interface
url(r"^search/$", ExperimentDocumentView.as_view({"get": "list"}), name="search"),
url(r"^experiments/$", ExperimentListView.as_view(), name="experiments"),
url(
r"^experiments/(?P<accession_code>.+)/$",
ExperimentDetailView.as_view(),
name="experiments_detail",
),
url(r"^samples/$", SampleListView.as_view(), name="samples"),
url(
r"^samples/(?P<accession_code>.+)/$",
SampleDetailView.as_view(),
name="samples_detail",
),
url(r"^organisms/$", OrganismListView.as_view(), name="organisms"),
url(
r"^organisms/(?P<name>.+)/$",
OrganismDetailView.as_view(),
name="organisms_detail",
),
url(r"^platforms/$", PlatformListView.as_view(), name="platforms"),
url(r"^institutions/$", InstitutionListView.as_view(), name="institutions"),
url(r"^processors/$", ProcessorListView.as_view(), name="processors"),
url(
r"^processors/(?P<id>[0-9a-f-]+)/$",
ProcessorDetailView.as_view(),
name="processors_detail",
),
# Deliverables
url(r"^dataset/$", DatasetView.as_view({"post": "create"}), name="create_dataset"),
url(
r"^dataset/(?P<id>[0-9a-f-]+)/$",
DatasetView.as_view({"get": "retrieve", "put": "update"}),
name="dataset",
),
url(r"^token/$", CreateAPITokenView.as_view(), name="token"),
url(r"^token/(?P<id>[0-9a-f-]+)/$", APITokenView.as_view(), name="token_id"),
# Jobs
url(r"^jobs/survey/$", SurveyJobListView.as_view(), name="survey_jobs"),
url(
r"^jobs/survey/(?P<id>[0-9a-f-]+)/$",
SurveyJobDetailView.as_view(),
name="survey_jobs_detail",
),
url(r"^jobs/downloader/$", DownloaderJobListView.as_view(), name="downloader_jobs"),
url(
r"^jobs/downloader/(?P<id>[0-9a-f-]+)/$",
DownloaderJobDetailView.as_view(),
name="downloader_jobs_detail",
),
url(r"^jobs/processor/$", ProcessorJobListView.as_view(), name="processor_jobs"),
url(
r"^jobs/processor/(?P<id>[0-9a-f-]+)/$",
ProcessorJobDetailView.as_view(),
name="processor_jobs_detail",
),
# Dashboard Driver
url(r"^stats/$", Stats.as_view(), name="stats"),
url(
r"^stats/failures/downloader$",
FailedDownloaderJobStats.as_view(),
name="stats_failed_downloader",
),
url(
r"^stats/failures/processor$",
FailedProcessorJobStats.as_view(),
name="stats_failed_processor",
),
url(r"^stats-about/$", AboutStats.as_view(), name="stats_about"),
# Transcriptome Indices
path(
"transcriptome_indices/",
include(
[
path(
"",
TranscriptomeIndexListView.as_view(),
name="transcriptome_indices",
),
path(
"<int:id>",
TranscriptomeIndexDetailView.as_view(),
name="transcriptome_indices_read",
),
]
),
),
# QN Targets
url(r"^qn_targets/$", QNTargetsAvailable.as_view(), name="qn_targets_available"),
url(
r"^qn_targets/(?P<organism_name>.+)$",
QNTargetsDetailView.as_view(),
name="qn_targets",
),
# Computed Files
url(r"^computed_files/$", ComputedFileListView.as_view(), name="computed_files"),
url(
r"^computed_files/(?P<id>[0-9a-f-]+)/$",
ComputedFileDetailView.as_view(),
name="computed_files_detail",
),
url(r"^original_files/$", OriginalFileListView.as_view(), name="original_files"),
url(
r"^original_files/(?P<id>[0-9a-f-]+)/$",
OriginalFileDetailView.as_view(),
name="original_files_detail",
),
url(
r"^computational_results/$",
ComputationalResultListView.as_view(),
name="results",
),
url(
r"^computational_results/(?P<id>[0-9a-f-]+)/$",
ComputationalResultDetailView.as_view(),
name="results_detail",
),
# Compendia
url(r"^compendia/$", CompendiumResultListView.as_view(), name="compendium_results"),
url(
r"^compendia/(?P<id>[0-9]+)/$",
CompendiumResultDetailView.as_view(),
name="compendium_result",
),
# v1 api docs
url(
r"^swagger/$",
schema_view.with_ui("swagger", cache_timeout=0),
name="schema_swagger_ui",
),
url(r"^$", schema_view.with_ui("redoc", cache_timeout=0), name="schema_redoc"),
]
),
),
# Admin
url(r"^admin/", admin.site.urls),
# Redirect root urls to latest version api docs
url(r"^swagger/$", RedirectView.as_view(url="/v1/swagger")),
url(r"^$", RedirectView.as_view(url="/v1")),
]
# handle errors
handler404 = handle404error
handler500 = handle500error
<|code_end|>
api/data_refinery_api/views/__init__.py
<|code_start|>from data_refinery_api.views.api_token import APITokenView, CreateAPITokenView
from data_refinery_api.views.compendium_result import (
CompendiumResultDetailView,
CompendiumResultListView,
)
from data_refinery_api.views.computational_result import (
ComputationalResultDetailView,
ComputationalResultListView,
)
from data_refinery_api.views.computed_file import ComputedFileDetailView, ComputedFileListView
from data_refinery_api.views.dataset import DatasetView
from data_refinery_api.views.experiment import ExperimentDetailView, ExperimentListView
from data_refinery_api.views.experiment_document import ExperimentDocumentView
from data_refinery_api.views.institution import InstitutionListView
from data_refinery_api.views.jobs import (
DownloaderJobDetailView,
DownloaderJobListView,
ProcessorJobDetailView,
ProcessorJobListView,
SurveyJobDetailView,
SurveyJobListView,
)
from data_refinery_api.views.organism import OrganismDetailView, OrganismListView
from data_refinery_api.views.original_file import OriginalFileDetailView, OriginalFileListView
from data_refinery_api.views.platform import PlatformListView
from data_refinery_api.views.processor import ProcessorDetailView, ProcessorListView
from data_refinery_api.views.qn_targets import QNTargetsAvailable, QNTargetsDetailView
from data_refinery_api.views.sample import SampleDetailView, SampleListView
from data_refinery_api.views.stats import (
AboutStats,
FailedDownloaderJobStats,
FailedProcessorJobStats,
Stats,
)
from data_refinery_api.views.transcriptome_index import (
TranscriptomeIndexDetailView,
TranscriptomeIndexListView,
)
<|code_end|>
api/data_refinery_api/views/api_token.py
<|code_start|>##
# Contains CreateAPITokenView, APITokenView, and needed serializer
##
from django.utils.decorators import method_decorator
from rest_framework import generics, serializers
from drf_yasg.utils import swagger_auto_schema
from data_refinery_common.models import APIToken
class APITokenSerializer(serializers.ModelSerializer):
class Meta:
model = APIToken
fields = ("id", "is_activated", "terms_and_conditions")
extra_kwargs = {
"id": {"read_only": True},
"is_activated": {"read_only": False},
"terms_and_conditions": {"read_only": True},
}
class CreateAPITokenView(generics.CreateAPIView):
"""
token_create
This endpoint can be used to create and activate tokens. These tokens can be used
in requests that provide urls to download computed files. Setting `is_activated` to
true indicates agreement with refine.bio's [Terms of Use](https://www.refine.bio/terms)
and [Privacy Policy](https://www.refine.bio/privacy).
```py
import requests
import json
response = requests.post('https://api.refine.bio/v1/token/')
token_id = response.json()['id']
response = requests.put('https://api.refine.bio/v1/token/' + token_id + '/', json.dumps({'is_activated': True}), headers={'Content-Type': 'application/json'})
```
The token id needs to be provided in the HTTP request in the API-KEY header.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
model = APIToken
serializer_class = APITokenSerializer
@method_decorator(name="patch", decorator=swagger_auto_schema(auto_schema=None))
class APITokenView(generics.RetrieveUpdateAPIView):
"""
Read and modify Api Tokens.
get:
Return details about a specific token.
put:
This can be used to activate a specific token by sending `is_activated: true`.
Setting `is_activated` to true indicates agreement with refine.bio's
[Terms of Use](https://www.refine.bio/terms) and
[Privacy Policy](https://www.refine.bio/privacy).
"""
model = APIToken
lookup_field = "id"
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
<|code_end|>
| api/data_refinery_api/urls.py
<|code_start|>from re import match
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.http import JsonResponse
from django.urls import include, path
from django.views.generic import RedirectView
from rest_framework import permissions
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from data_refinery_api.views import (
AboutStats,
APITokenView,
CompendiumResultDetailView,
CompendiumResultListView,
ComputationalResultDetailView,
ComputationalResultListView,
ComputedFileDetailView,
ComputedFileListView,
DatasetView,
DownloaderJobDetailView,
DownloaderJobListView,
ExperimentDetailView,
ExperimentDocumentView,
ExperimentListView,
FailedDownloaderJobStats,
FailedProcessorJobStats,
InstitutionListView,
OrganismDetailView,
OrganismListView,
OriginalFileDetailView,
OriginalFileListView,
PlatformListView,
ProcessorDetailView,
ProcessorJobDetailView,
ProcessorJobListView,
ProcessorListView,
QNTargetsAvailable,
QNTargetsDetailView,
SampleDetailView,
SampleListView,
Stats,
SurveyJobDetailView,
SurveyJobListView,
TranscriptomeIndexDetailView,
TranscriptomeIndexListView,
)
# error handlers
def handle404error(request, exception):
message = "The requested resource was not found on this server."
url = "https://api.refine.bio/"
# check to see if the 404ed request contained a version
if not match(r"^/v[1-9]/.*", request.path):
message = "refine.bio API resources are only available through versioned requests."
return JsonResponse({"message": message, "docs": url, "status_code": 404,}, status=404)
def handle500error(request):
return JsonResponse(
{"message": "A server error occured. This has been reported.", "status_code": 500,},
status=500,
)
# This provides _public_ access to the /admin interface!
# Enabling this by setting DEBUG to true this will allow unauthenticated access to the admin interface.
# Very useful for debugging (since we have no User accounts), but very dangerous for prod!
class AccessUser:
has_module_perms = has_perm = __getattr__ = lambda s, *a, **kw: True
if settings.DEBUG:
admin.site.has_permission = lambda r: setattr(r, "user", AccessUser()) or True
schema_view = get_schema_view(
openapi.Info(
title="Refine.bio API",
default_version="v1",
description="""
refine.bio is a multi-organism collection of genome-wide transcriptome or gene expression data that has been obtained from publicly available repositories and uniformly processed and normalized. refine.bio allows biologists, clinicians, and machine learning researchers to search for experiments from different source repositories all in one place and build custom data sets for their questions of interest.
The swagger-ui view can be found [here](http://api.refine.bio/swagger/).
The ReDoc view can be found [here](http://api.refine.bio/).
Additional documentation can be found at [docs.refine.bio](http://docs.refine.bio/en/latest/).
### Questions/Feedback?
If you have a question or comment, please [file an issue on GitHub](https://github.com/AlexsLemonade/refinebio/issues) or send us an email at [ccdl@alexslemonade.org](mailto:ccdl@alexslemonade.org).
""",
terms_of_service="https://www.refine.bio/terms",
contact=openapi.Contact(email="ccdl@alexslemonade.org"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(
r"^(?P<version>v1)/",
include(
[
# Primary search and filter interface
url(r"^search/$", ExperimentDocumentView.as_view({"get": "list"}), name="search"),
url(r"^experiments/$", ExperimentListView.as_view(), name="experiments"),
url(
r"^experiments/(?P<accession_code>.+)/$",
ExperimentDetailView.as_view(),
name="experiments_detail",
),
url(r"^samples/$", SampleListView.as_view(), name="samples"),
url(
r"^samples/(?P<accession_code>.+)/$",
SampleDetailView.as_view(),
name="samples_detail",
),
url(r"^organisms/$", OrganismListView.as_view(), name="organisms"),
url(
r"^organisms/(?P<name>.+)/$",
OrganismDetailView.as_view(),
name="organisms_detail",
),
url(r"^platforms/$", PlatformListView.as_view(), name="platforms"),
url(r"^institutions/$", InstitutionListView.as_view(), name="institutions"),
url(r"^processors/$", ProcessorListView.as_view(), name="processors"),
url(
r"^processors/(?P<id>[0-9a-f-]+)/$",
ProcessorDetailView.as_view(),
name="processors_detail",
),
# Deliverables
url(r"^dataset/$", DatasetView.as_view({"post": "create"}), name="create_dataset"),
url(
r"^dataset/(?P<id>[0-9a-f-]+)/$",
DatasetView.as_view({"get": "retrieve", "put": "update"}),
name="dataset",
),
url(r"^token/$", APITokenView.as_view({"post": "create"}), name="token"),
url(
r"^token/(?P<id>[0-9a-f-]+)/$",
APITokenView.as_view({"get": "retrieve", "put": "update"}),
name="token_id",
),
# Jobs
url(r"^jobs/survey/$", SurveyJobListView.as_view(), name="survey_jobs"),
url(
r"^jobs/survey/(?P<id>[0-9a-f-]+)/$",
SurveyJobDetailView.as_view(),
name="survey_jobs_detail",
),
url(r"^jobs/downloader/$", DownloaderJobListView.as_view(), name="downloader_jobs"),
url(
r"^jobs/downloader/(?P<id>[0-9a-f-]+)/$",
DownloaderJobDetailView.as_view(),
name="downloader_jobs_detail",
),
url(r"^jobs/processor/$", ProcessorJobListView.as_view(), name="processor_jobs"),
url(
r"^jobs/processor/(?P<id>[0-9a-f-]+)/$",
ProcessorJobDetailView.as_view(),
name="processor_jobs_detail",
),
# Dashboard Driver
url(r"^stats/$", Stats.as_view(), name="stats"),
url(
r"^stats/failures/downloader$",
FailedDownloaderJobStats.as_view(),
name="stats_failed_downloader",
),
url(
r"^stats/failures/processor$",
FailedProcessorJobStats.as_view(),
name="stats_failed_processor",
),
url(r"^stats-about/$", AboutStats.as_view(), name="stats_about"),
# Transcriptome Indices
path(
"transcriptome_indices/",
include(
[
path(
"",
TranscriptomeIndexListView.as_view(),
name="transcriptome_indices",
),
path(
"<int:id>",
TranscriptomeIndexDetailView.as_view(),
name="transcriptome_indices_read",
),
]
),
),
# QN Targets
url(r"^qn_targets/$", QNTargetsAvailable.as_view(), name="qn_targets_available"),
url(
r"^qn_targets/(?P<organism_name>.+)$",
QNTargetsDetailView.as_view(),
name="qn_targets",
),
# Computed Files
url(r"^computed_files/$", ComputedFileListView.as_view(), name="computed_files"),
url(
r"^computed_files/(?P<id>[0-9a-f-]+)/$",
ComputedFileDetailView.as_view(),
name="computed_files_detail",
),
url(r"^original_files/$", OriginalFileListView.as_view(), name="original_files"),
url(
r"^original_files/(?P<id>[0-9a-f-]+)/$",
OriginalFileDetailView.as_view(),
name="original_files_detail",
),
url(
r"^computational_results/$",
ComputationalResultListView.as_view(),
name="results",
),
url(
r"^computational_results/(?P<id>[0-9a-f-]+)/$",
ComputationalResultDetailView.as_view(),
name="results_detail",
),
# Compendia
url(r"^compendia/$", CompendiumResultListView.as_view(), name="compendium_results"),
url(
r"^compendia/(?P<id>[0-9]+)/$",
CompendiumResultDetailView.as_view(),
name="compendium_result",
),
# v1 api docs
url(
r"^swagger/$",
schema_view.with_ui("swagger", cache_timeout=0),
name="schema_swagger_ui",
),
url(r"^$", schema_view.with_ui("redoc", cache_timeout=0), name="schema_redoc"),
]
),
),
# Admin
url(r"^admin/", admin.site.urls),
# Redirect root urls to latest version api docs
url(r"^swagger/$", RedirectView.as_view(url="/v1/swagger")),
url(r"^$", RedirectView.as_view(url="/v1")),
]
# handle errors
handler404 = handle404error
handler500 = handle500error
<|code_end|>
api/data_refinery_api/views/__init__.py
<|code_start|>from data_refinery_api.views.api_token import APITokenView
from data_refinery_api.views.compendium_result import (
CompendiumResultDetailView,
CompendiumResultListView,
)
from data_refinery_api.views.computational_result import (
ComputationalResultDetailView,
ComputationalResultListView,
)
from data_refinery_api.views.computed_file import ComputedFileDetailView, ComputedFileListView
from data_refinery_api.views.dataset import DatasetView
from data_refinery_api.views.experiment import ExperimentDetailView, ExperimentListView
from data_refinery_api.views.experiment_document import ExperimentDocumentView
from data_refinery_api.views.institution import InstitutionListView
from data_refinery_api.views.jobs import (
DownloaderJobDetailView,
DownloaderJobListView,
ProcessorJobDetailView,
ProcessorJobListView,
SurveyJobDetailView,
SurveyJobListView,
)
from data_refinery_api.views.organism import OrganismDetailView, OrganismListView
from data_refinery_api.views.original_file import OriginalFileDetailView, OriginalFileListView
from data_refinery_api.views.platform import PlatformListView
from data_refinery_api.views.processor import ProcessorDetailView, ProcessorListView
from data_refinery_api.views.qn_targets import QNTargetsAvailable, QNTargetsDetailView
from data_refinery_api.views.sample import SampleDetailView, SampleListView
from data_refinery_api.views.stats import (
AboutStats,
FailedDownloaderJobStats,
FailedProcessorJobStats,
Stats,
)
from data_refinery_api.views.transcriptome_index import (
TranscriptomeIndexDetailView,
TranscriptomeIndexListView,
)
<|code_end|>
api/data_refinery_api/views/api_token.py
<|code_start|>##
# Contains CreateAPITokenView, APITokenView, and needed serializer
##
from django.utils.decorators import method_decorator
from rest_framework import mixins, serializers, viewsets
from drf_yasg.utils import swagger_auto_schema
from data_refinery_common.models import APIToken
class APITokenSerializer(serializers.ModelSerializer):
class Meta:
model = APIToken
fields = ("id", "is_activated", "terms_and_conditions")
extra_kwargs = {
"id": {"read_only": True},
"is_activated": {"read_only": False},
"terms_and_conditions": {"read_only": True},
}
@method_decorator(
name="create",
decorator=swagger_auto_schema(
operation_description="""
This endpoint can be used to create and activate tokens. These tokens can be used
in requests that provide urls to download computed files. Setting `is_activated` to
true indicates agreement with refine.bio's [Terms of Use](https://www.refine.bio/terms)
and [Privacy Policy](https://www.refine.bio/privacy).
```py
import requests
import json
response = requests.post('https://api.refine.bio/v1/token/')
token_id = response.json()['id']
response = requests.put('https://api.refine.bio/v1/token/' + token_id + '/', json.dumps({'is_activated': True}), headers={'Content-Type': 'application/json'})
```
The token id needs to be provided in the HTTP request in the API-KEY header.
References
- [https://github.com/AlexsLemonade/refinebio/issues/731]()
- [https://github.com/AlexsLemonade/refinebio-frontend/issues/560]()
"""
),
)
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(operation_description="Return details about a specific token.",),
)
@method_decorator(
name="update",
decorator=swagger_auto_schema(
operation_description="""
This can be used to activate a specific token by sending `is_activated: true`.
Setting `is_activated` to true indicates agreement with refine.bio's
[Terms of Use](https://www.refine.bio/terms) and
[Privacy Policy](https://www.refine.bio/privacy).
"""
),
)
class APITokenView(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
"""
Create, read, and modify Api Tokens.
"""
model = APIToken
lookup_field = "id"
queryset = APIToken.objects.all()
serializer_class = APITokenSerializer
<|code_end|>
|
Dataset validation seems to be broken.
### Context
I saw a couple slackbot messages about no smashable files.1
### Problem or idea
Dataset ids:
* a71c681e-7dab-4d99-93c7-c43382ef0c72
* 1fd5bca4-c322-4433-ac65-d5d8f616d679
I only looked into 1fd5bca4-c322-4433-ac65-d5d8f616d679 but it had no email address and one of the experiments didn't even exist in our database at all.
### Solution or next step
Figure out how these datasets were able to be created and prevent it from happening.
| api/data_refinery_api/views/dataset.py
<|code_start|>##
# Contains DatasetView
##
from collections import defaultdict
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from rest_framework import mixins, serializers, viewsets
from rest_framework.exceptions import APIException
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
Dataset,
DatasetAnnotation,
Experiment,
Organism,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
)
logger = get_and_configure_logger(__name__)
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
def experiment_has_downloadable_samples(experiment, quant_sf_only=False):
if quant_sf_only:
try:
experiment = Experiment.public_objects.get(accession_code=experiment)
except Experiment.DoesNotExist:
return False
samples = experiment.sample_set.filter(
# We only want samples with a quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples.count() == 0:
return False
else:
try:
Experiment.processed_public_objects.get(accession_code=experiment)
except Experiment.DoesNotExist:
return False
return True
def validate_dataset(data):
"""
Dataset validation. Each experiment should always have at least one
sample, all samples should be downloadable, and when starting the smasher
there should be at least one experiment.
"""
if data.get("data") is None or type(data["data"]) != dict:
raise serializers.ValidationError("`data` must be a dict of lists.")
if data.get("start") and len(data["data"]) == 0:
raise serializers.ValidationError("`data` must contain at least one experiment..")
accessions = []
non_downloadable_experiments = []
for key, value in data["data"].items():
if type(value) != list:
raise serializers.ValidationError(
"`data` must be a dict of lists. Problem with `" + str(key) + "`"
)
if len(value) < 1:
raise serializers.ValidationError(
"`data` must be a dict of lists, each with one or more elements. Problem with `"
+ str(key)
+ "`"
)
if len(value) != len(set(value)):
raise serializers.ValidationError("Duplicate values detected in " + str(value))
# If they want "ALL", just make sure that the experiment has at least one downloadable sample
if value == ["ALL"]:
if not experiment_has_downloadable_samples(
key, quant_sf_only=data.get("quant_sf_only", False)
):
non_downloadable_experiments.append(key)
# Otherwise, we will check that all the samples they requested are downloadable
else:
accessions.extend(value)
if len(non_downloadable_experiments) != 0:
raise serializers.ValidationError(
{
"message": "Experiment(s) in dataset have zero downloadable samples. See `non_downloadable_experiments` for a full list",
"non_downloadable_experiments": non_downloadable_experiments,
}
)
if len(accessions) == 0:
return
if data.get("quant_sf_only", False):
samples_without_quant_sf = Sample.public_objects.filter(
accession_code__in=accessions
).exclude(
# Exclude samples that have at least one uploaded quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples_without_quant_sf.count() > 0:
raise serializers.ValidationError(
{
"message": "Sample(s) in dataset are missing quant.sf files. See `non_downloadable_samples` for a full list",
"non_downloadable_samples": [
s.accession_code for s in samples_without_quant_sf
],
},
)
else:
unprocessed_samples = Sample.public_objects.filter(
accession_code__in=accessions, is_processed=False
)
if unprocessed_samples.count() > 0:
raise serializers.ValidationError(
{
"message": "Non-downloadable sample(s) in dataset. See `non_downloadable_samples` for a full list",
"non_downloadable_samples": [s.accession_code for s in unprocessed_samples],
}
)
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
""" This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
def create(self, validated_data):
# "start" isn't actually a field on the Dataset model, we just use it
# on the frontend to control when the dataset gets dispatched
if "start" in validated_data:
validated_data.pop("start")
return super(DatasetSerializer, self).create(validated_data)
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"email_address",
"email_ccdl_ok",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {"required": True,},
"id": {"read_only": True,},
"is_processing": {"read_only": True,},
"is_processed": {"read_only": True,},
"is_available": {"read_only": True,},
"email_address": {"required": False, "write_only": True},
"email_ccdl_ok": {"required": False, "write_only": True},
"expires_on": {"read_only": True,},
"s3_bucket": {"read_only": True,},
"s3_key": {"read_only": True,},
"success": {"read_only": True,},
"failure_reason": {"read_only": True,},
"created_at": {"read_only": True,},
"last_modified": {"read_only": True,},
"size_in_bytes": {"read_only": True,},
"sha1": {"read_only": True,},
"download_url": {"read_only": True,},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
validate_dataset(data)
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="update",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except (APIToken.DoesNotExist, ValidationError):
return serializer_context
def validate_token(self):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
APIToken.objects.get(id=token_id, is_activated=True)
except (APIToken.DoesNotExist, ValidationError):
raise serializers.ValidationError("You must provide an active API token ID")
@staticmethod
def convert_ALL_to_accessions(data):
qn_organisms = Organism.get_objects_with_qn_targets()
for key in data["data"].keys():
accessions = data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
data["data"][key] = sample_codes
def validate_email_address_is_nonempty(self):
"""Check to make sure the email exists. We call this when getting ready to dispatch a dataset"""
supplied_email_address = self.request.data.get("email_address", None)
if supplied_email_address is None:
raise serializers.ValidationError("You must provide an email address.")
def dispatch_job(self, serializer, obj):
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = obj
pjda.save()
job_sent = False
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception as e:
# Just log whatever exception happens, because the foreman wil requeue the job anyway
logger.error(e)
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
# create a new dataset annotation with the information of this request
annotation = DatasetAnnotation()
annotation.dataset = obj
annotation.data = {
"start": True,
"ip": get_client_ip(self.request),
"user_agent": self.request.META.get("HTTP_USER_AGENT", None),
}
annotation.save()
def create_or_update(self, serializer):
""" If `start` is set, fire off the job. Otherwise just create/update the dataset"""
data = serializer.validated_data
DatasetView.convert_ALL_to_accessions(data)
if data.get("start"):
self.validate_token()
self.validate_email_address_is_nonempty()
obj = serializer.save()
self.dispatch_job(serializer, obj)
else:
serializer.save()
def perform_create(self, serializer):
# Since we are creating a new dataset, there is no way it is already processed
self.create_or_update(serializer)
def perform_update(self, serializer):
# Check to make sure we have not already processed the dataset
old_object = self.get_object()
if old_object.is_processed:
raise serializers.ValidationError(
"You may not update Datasets which have already been processed"
)
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
elif old_object.is_processing:
self.validate_email_address_is_nonempty()
serializer.validated_data["data"] = old_object.data
serializer.validated_data["aggregate_by"] = old_object.aggregate_by
serializer.save()
else:
self.create_or_update(serializer)
<|code_end|>
| api/data_refinery_api/views/dataset.py
<|code_start|>##
# Contains DatasetView
##
from collections import defaultdict
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from rest_framework import filters, generics, mixins, serializers, viewsets
from rest_framework.exceptions import APIException
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
Dataset,
DatasetAnnotation,
Experiment,
Organism,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
)
logger = get_and_configure_logger(__name__)
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
def experiment_has_downloadable_samples(experiment, quant_sf_only=False):
if quant_sf_only:
try:
experiment = Experiment.public_objects.get(accession_code=experiment)
except Experiment.DoesNotExist:
return False
samples = experiment.sample_set.filter(
# We only want samples with a quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples.count() == 0:
return False
else:
try:
experiment = Experiment.processed_public_objects.get(accession_code=experiment)
except Experiment.DoesNotExist:
return False
return True
def validate_dataset(data):
"""
Dataset validation. Each experiment should always have at least one
sample, all samples should be downloadable, and when starting the smasher
there should be at least one experiment.
"""
if data.get("data") is None or type(data["data"]) != dict:
raise serializers.ValidationError("`data` must be a dict of lists.")
if data.get("start") and len(data["data"]) == 0:
raise serializers.ValidationError("`data` must contain at least one experiment..")
accessions = []
non_downloadable_experiments = []
for key, value in data["data"].items():
if type(value) != list:
raise serializers.ValidationError(
"`data` must be a dict of lists. Problem with `" + str(key) + "`"
)
if len(value) < 1:
raise serializers.ValidationError(
"`data` must be a dict of lists, each with one or more elements. Problem with `"
+ str(key)
+ "`"
)
if len(value) != len(set(value)):
raise serializers.ValidationError("Duplicate values detected in " + str(value))
# If they want "ALL", just make sure that the experiment has at least one downloadable sample
if value == ["ALL"]:
if not experiment_has_downloadable_samples(
key, quant_sf_only=data.get("quant_sf_only", False)
):
non_downloadable_experiments.append(key)
# Otherwise, we will check that all the samples they requested are downloadable
else:
accessions.extend(value)
if len(non_downloadable_experiments) != 0:
raise serializers.ValidationError(
{
"message": "Experiment(s) in dataset have zero downloadable samples. See `non_downloadable_experiments` for a full list",
"non_downloadable_experiments": non_downloadable_experiments,
}
)
if len(accessions) == 0:
return
samples = Sample.public_objects.filter(accession_code__in=accessions)
if samples.count() != len(accessions):
raise serializers.ValidationError(
{
"message": "Sample(s) in dataset do not exist on refine.bio. See `non_downloadable_samples` for a full list",
"non_downloadable_samples": list(
set(accessions) - set(s.accession_code for s in samples)
),
},
)
if data.get("quant_sf_only", False):
samples_without_quant_sf = samples.exclude(
# Exclude samples that have at least one uploaded quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples_without_quant_sf.count() > 0:
raise serializers.ValidationError(
{
"message": "Sample(s) in dataset are missing quant.sf files. See `non_downloadable_samples` for a full list",
"non_downloadable_samples": [
s.accession_code for s in samples_without_quant_sf
],
},
)
else:
unprocessed_samples = samples.exclude(is_processed=True)
if unprocessed_samples.count() > 0:
raise serializers.ValidationError(
{
"message": "Non-downloadable sample(s) in dataset. See `non_downloadable_samples` for a full list",
"non_downloadable_samples": [s.accession_code for s in unprocessed_samples],
}
)
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
""" This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
def create(self, validated_data):
# "start" isn't actually a field on the Dataset model, we just use it
# on the frontend to control when the dataset gets dispatched
if "start" in validated_data:
validated_data.pop("start")
return super(DatasetSerializer, self).create(validated_data)
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"email_address",
"email_ccdl_ok",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {"required": True,},
"id": {"read_only": True,},
"is_processing": {"read_only": True,},
"is_processed": {"read_only": True,},
"is_available": {"read_only": True,},
"email_address": {"required": False, "write_only": True},
"email_ccdl_ok": {"required": False, "write_only": True},
"expires_on": {"read_only": True,},
"s3_bucket": {"read_only": True,},
"s3_key": {"read_only": True,},
"success": {"read_only": True,},
"failure_reason": {"read_only": True,},
"created_at": {"read_only": True,},
"last_modified": {"read_only": True,},
"size_in_bytes": {"read_only": True,},
"sha1": {"read_only": True,},
"download_url": {"read_only": True,},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
validate_dataset(data)
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="update",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
""" View and modify a single Dataset. """
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except (APIToken.DoesNotExist, ValidationError):
return serializer_context
def validate_token(self):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
APIToken.objects.get(id=token_id, is_activated=True)
except (APIToken.DoesNotExist, ValidationError):
raise serializers.ValidationError("You must provide an active API token ID")
@staticmethod
def convert_ALL_to_accessions(data):
qn_organisms = Organism.get_objects_with_qn_targets()
for key in data["data"].keys():
accessions = data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
data["data"][key] = sample_codes
def validate_email_address_is_nonempty(self):
"""Check to make sure the email exists. We call this when getting ready to dispatch a dataset"""
supplied_email_address = self.request.data.get("email_address", None)
if supplied_email_address is None or supplied_email_address == "":
raise serializers.ValidationError("You must provide an email address.")
def dispatch_job(self, serializer, obj):
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = obj
pjda.save()
job_sent = False
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception as e:
# Just log whatever exception happens, because the foreman wil requeue the job anyway
logger.error(e)
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
# create a new dataset annotation with the information of this request
annotation = DatasetAnnotation()
annotation.dataset = obj
annotation.data = {
"start": True,
"ip": get_client_ip(self.request),
"user_agent": self.request.META.get("HTTP_USER_AGENT", None),
}
annotation.save()
def create_or_update(self, serializer):
""" If `start` is set, fire off the job. Otherwise just create/update the dataset"""
data = serializer.validated_data
DatasetView.convert_ALL_to_accessions(data)
if data.get("start"):
self.validate_token()
self.validate_email_address_is_nonempty()
obj = serializer.save()
self.dispatch_job(serializer, obj)
else:
serializer.save()
def perform_create(self, serializer):
# Since we are creating a new dataset, there is no way it is already processed
self.create_or_update(serializer)
def perform_update(self, serializer):
# Check to make sure we have not already processed the dataset
old_object = self.get_object()
if old_object.is_processed:
raise serializers.ValidationError(
"You may not update Datasets which have already been processed"
)
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
elif old_object.is_processing:
self.validate_email_address_is_nonempty()
serializer.validated_data["data"] = old_object.data
serializer.validated_data["aggregate_by"] = old_object.aggregate_by
serializer.save()
else:
self.create_or_update(serializer)
<|code_end|>
|
Find a way to assign volume IDs to instances without EBS
### Context
https://github.com/AlexsLemonade/refinebio/issues/2254
### Problem or idea
We stopped creating EBS volumes now that we do all of the processing on the SSDs, but now we can only run one instance at once because instances are always assigned volume index 0.
### Solution or next step
Brainstorm ways to assign volume indices to instances. Maybe we can query nomad for active instances?
| common/data_refinery_common/migrations/0059_auto_20200803_1719.py
<|code_start|><|code_end|>
common/data_refinery_common/models/jobs/downloader_job.py
<|code_start|>from typing import Set
from django.db import models
from django.utils import timezone
from nomad import Nomad
from data_refinery_common.models.jobs.job_managers import (
FailedJobsManager,
HungJobsManager,
LostJobsManager,
)
from data_refinery_common.models.sample import Sample
from data_refinery_common.utils import get_env_variable
class DownloaderJob(models.Model):
"""Records information about running a Downloader."""
class Meta:
db_table = "downloader_jobs"
indexes = [
models.Index(
fields=["created_at"],
name="downloader_jobs_created_at",
# condition=Q(success=None, retried=False, no_retry=False)
),
models.Index(fields=["worker_id"]),
]
# Managers
objects = models.Manager()
failed_objects = FailedJobsManager()
hung_objects = HungJobsManager()
lost_objects = LostJobsManager()
# This field contains a string which corresponds to a valid
# Downloader Task. Valid values are enumerated in:
# data_refinery_common.job_lookup.Downloaders
downloader_task = models.CharField(max_length=256)
accession_code = models.CharField(max_length=256, blank=True, null=True)
no_retry = models.BooleanField(default=False)
original_files = models.ManyToManyField(
"OriginalFile", through="DownloaderJobOriginalFileAssociation"
)
# Tracking
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
success = models.NullBooleanField(null=True)
nomad_job_id = models.CharField(max_length=256, null=True)
# Resources
ram_amount = models.IntegerField(default=1024)
volume_index = models.CharField(max_length=3, null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This point of this field is to identify which worker ran the
# job. A few fields may actually be required or something other
# than just an id.
worker_id = models.CharField(max_length=256, null=True)
# This field corresponds to the version number of the
# data_refinery_workers project that was used to run the job.
worker_version = models.CharField(max_length=128, null=True)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
# If the job is retried, this is the id of the new job
retried_job = models.ForeignKey("self", on_delete=models.PROTECT, null=True)
# If the job was recreated because the data it downloaded got
# lost, deleted, or corrupted then this field will be true.
# This helps prevent an infinite loop of DownloaderJob recreation.
was_recreated = models.BooleanField(default=False)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_samples(self) -> Set[Sample]:
samples = set()
for original_file in self.original_files.all():
for sample in original_file.samples.all():
samples.add(sample)
return samples
def kill_nomad_job(self) -> bool:
if not self.nomad_job_id:
return False
# try:
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
nomad_client.job.deregister_job(self.nomad_job_id)
# except:
# return False
return True
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(DownloaderJob, self).save(*args, **kwargs)
def __str__(self):
return "DownloaderJob " + str(self.pk) + ": " + str(self.downloader_task)
<|code_end|>
common/data_refinery_common/models/jobs/processor_job.py
<|code_start|>from typing import Set
from django.db import models
from django.utils import timezone
import nomad
from nomad import Nomad
from data_refinery_common.models.jobs.job_managers import (
FailedJobsManager,
HungJobsManager,
LostJobsManager,
)
from data_refinery_common.models.sample import Sample
from data_refinery_common.utils import get_env_variable
class ProcessorJob(models.Model):
"""Records information about running a processor."""
class Meta:
db_table = "processor_jobs"
indexes = [
models.Index(
fields=["created_at"],
name="processor_jobs_created_at",
# A partial index might be better here, given our queries we don't
# need to index the whole table. We need to update to Django 2.2
# for this to be supported.
# condition=Q(success=None, retried=False, no_retry=False)
# https://github.com/AlexsLemonade/refinebio/issues/1454
),
]
# Managers
objects = models.Manager()
failed_objects = FailedJobsManager()
hung_objects = HungJobsManager()
lost_objects = LostJobsManager()
# This field will contain an enumerated value specifying which
# processor pipeline was applied during the processor job.
pipeline_applied = models.CharField(max_length=256)
original_files = models.ManyToManyField(
"OriginalFile", through="ProcessorJobOriginalFileAssociation"
)
datasets = models.ManyToManyField("DataSet", through="ProcessorJobDataSetAssociation")
no_retry = models.BooleanField(default=False)
abort = models.BooleanField(default=False)
# Resources
ram_amount = models.IntegerField(default=2048)
volume_index = models.CharField(max_length=3, null=True)
# Tracking
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
success = models.NullBooleanField(null=True)
nomad_job_id = models.CharField(max_length=256, null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This point of this field is to identify which worker ran the
# job. A few fields may actually be required or something other
# than just an id.
worker_id = models.CharField(max_length=256, null=True)
# This field corresponds to the version number of the
# data_refinery_workers project that was used to run the job.
worker_version = models.CharField(max_length=128, null=True)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
# If the job is retried, this is the id of the new job
retried_job = models.ForeignKey("self", on_delete=models.SET_NULL, null=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_samples(self) -> Set[Sample]:
samples = set()
for original_file in self.original_files.all():
for sample in original_file.samples.all():
samples.add(sample)
return samples
def kill_nomad_job(self) -> bool:
if not self.nomad_job_id:
return False
try:
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
nomad_client.job.deregister_job(self.nomad_job_id)
except nomad.api.exceptions.BaseNomadException:
return False
return True
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ProcessorJob, self).save(*args, **kwargs)
def __str__(self):
return "ProcessorJob " + str(self.pk) + ": " + str(self.pipeline_applied)
<|code_end|>
| common/data_refinery_common/migrations/0059_auto_20200803_1719.py
<|code_start|># Generated by Django 2.2.13 on 2020-08-03 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0058_auto_20200625_1402"),
]
operations = [
migrations.AlterField(
model_name="downloaderjob",
name="volume_index",
field=models.CharField(max_length=25, null=True),
),
migrations.AlterField(
model_name="processorjob",
name="volume_index",
field=models.CharField(max_length=25, null=True),
),
]
<|code_end|>
common/data_refinery_common/models/jobs/downloader_job.py
<|code_start|>from typing import Set
from django.db import models
from django.utils import timezone
from nomad import Nomad
from data_refinery_common.models.jobs.job_managers import (
FailedJobsManager,
HungJobsManager,
LostJobsManager,
)
from data_refinery_common.models.sample import Sample
from data_refinery_common.utils import get_env_variable
class DownloaderJob(models.Model):
"""Records information about running a Downloader."""
class Meta:
db_table = "downloader_jobs"
indexes = [
models.Index(
fields=["created_at"],
name="downloader_jobs_created_at",
# condition=Q(success=None, retried=False, no_retry=False)
),
models.Index(fields=["worker_id"]),
]
# Managers
objects = models.Manager()
failed_objects = FailedJobsManager()
hung_objects = HungJobsManager()
lost_objects = LostJobsManager()
# This field contains a string which corresponds to a valid
# Downloader Task. Valid values are enumerated in:
# data_refinery_common.job_lookup.Downloaders
downloader_task = models.CharField(max_length=256)
accession_code = models.CharField(max_length=256, blank=True, null=True)
no_retry = models.BooleanField(default=False)
original_files = models.ManyToManyField(
"OriginalFile", through="DownloaderJobOriginalFileAssociation"
)
# Tracking
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
success = models.NullBooleanField(null=True)
nomad_job_id = models.CharField(max_length=256, null=True)
# Resources
ram_amount = models.IntegerField(default=1024)
# The volume index is the instance id of an AWS EC2 machine. It looks like
# these are 19 characters, but just to be safe we'll make the max length a
# bit higher
volume_index = models.CharField(max_length=25, null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This point of this field is to identify which worker ran the
# job. A few fields may actually be required or something other
# than just an id.
worker_id = models.CharField(max_length=256, null=True)
# This field corresponds to the version number of the
# data_refinery_workers project that was used to run the job.
worker_version = models.CharField(max_length=128, null=True)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
# If the job is retried, this is the id of the new job
retried_job = models.ForeignKey("self", on_delete=models.PROTECT, null=True)
# If the job was recreated because the data it downloaded got
# lost, deleted, or corrupted then this field will be true.
# This helps prevent an infinite loop of DownloaderJob recreation.
was_recreated = models.BooleanField(default=False)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_samples(self) -> Set[Sample]:
samples = set()
for original_file in self.original_files.all():
for sample in original_file.samples.all():
samples.add(sample)
return samples
def kill_nomad_job(self) -> bool:
if not self.nomad_job_id:
return False
# try:
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
nomad_client.job.deregister_job(self.nomad_job_id)
# except:
# return False
return True
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(DownloaderJob, self).save(*args, **kwargs)
def __str__(self):
return "DownloaderJob " + str(self.pk) + ": " + str(self.downloader_task)
<|code_end|>
common/data_refinery_common/models/jobs/processor_job.py
<|code_start|>from typing import Set
from django.db import models
from django.utils import timezone
import nomad
from nomad import Nomad
from data_refinery_common.models.jobs.job_managers import (
FailedJobsManager,
HungJobsManager,
LostJobsManager,
)
from data_refinery_common.models.sample import Sample
from data_refinery_common.utils import get_env_variable
class ProcessorJob(models.Model):
"""Records information about running a processor."""
class Meta:
db_table = "processor_jobs"
indexes = [
models.Index(
fields=["created_at"],
name="processor_jobs_created_at",
# A partial index might be better here, given our queries we don't
# need to index the whole table. We need to update to Django 2.2
# for this to be supported.
# condition=Q(success=None, retried=False, no_retry=False)
# https://github.com/AlexsLemonade/refinebio/issues/1454
),
]
# Managers
objects = models.Manager()
failed_objects = FailedJobsManager()
hung_objects = HungJobsManager()
lost_objects = LostJobsManager()
# This field will contain an enumerated value specifying which
# processor pipeline was applied during the processor job.
pipeline_applied = models.CharField(max_length=256)
original_files = models.ManyToManyField(
"OriginalFile", through="ProcessorJobOriginalFileAssociation"
)
datasets = models.ManyToManyField("DataSet", through="ProcessorJobDataSetAssociation")
no_retry = models.BooleanField(default=False)
abort = models.BooleanField(default=False)
# Resources
ram_amount = models.IntegerField(default=2048)
# The volume index is the instance id of an AWS EC2 machine. It looks like
# these are 19 characters, but just to be safe we'll make the max length a
# bit higher
volume_index = models.CharField(max_length=25, null=True)
# Tracking
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
success = models.NullBooleanField(null=True)
nomad_job_id = models.CharField(max_length=256, null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This point of this field is to identify which worker ran the
# job. A few fields may actually be required or something other
# than just an id.
worker_id = models.CharField(max_length=256, null=True)
# This field corresponds to the version number of the
# data_refinery_workers project that was used to run the job.
worker_version = models.CharField(max_length=128, null=True)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
# If the job is retried, this is the id of the new job
retried_job = models.ForeignKey("self", on_delete=models.SET_NULL, null=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_samples(self) -> Set[Sample]:
samples = set()
for original_file in self.original_files.all():
for sample in original_file.samples.all():
samples.add(sample)
return samples
def kill_nomad_job(self) -> bool:
if not self.nomad_job_id:
return False
try:
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = Nomad(nomad_host, port=int(nomad_port), timeout=30)
nomad_client.job.deregister_job(self.nomad_job_id)
except nomad.api.exceptions.BaseNomadException:
return False
return True
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ProcessorJob, self).save(*args, **kwargs)
def __str__(self):
return "ProcessorJob " + str(self.pk) + ": " + str(self.pipeline_applied)
<|code_end|>
|
Don't fail when detecting column header for illumina sample
### Context
We have `4287` samples that failed because we weren't able to find an id column in their headers. For example for sample [GSM2677584](https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSM2677584) we failed with the error:
`Could not find any ID column in headers ['ID_REF ', 'SAMPLE 1', 'Detection Pval', 'SAMPLE 2', 'Detection Pval', 'SAMPLE 3', 'Detection Pval', 'SAMPLE 4', 'Detection Pval', 'SAMPLE 5', 'Detection Pval', 'SAMPLE 6', 'Detection Pval', 'SAMPLE 7', 'Detection Pval', 'SAMPLE 8', 'Detection Pval', 'SAMPLE 9', 'Detection Pval', 'SAMPLE 10', 'Detection Pval', 'SAMPLE 11', 'Detection Pval', 'SAMPLE 12', 'Detection Pval', 'SAMPLE 13', 'Detection Pval', 'SAMPLE 14', 'Detection Pval', 'SAMPLE 15', 'Detection Pval', 'SAMPLE 16', 'Detection Pval', 'SAMPLE 17', 'Detection Pval', 'SAMPLE 18', 'Detection Pval'] for file /home/user/data_store/processor_job_1310904/GSE100301_non-normalized.txt.sanitized`
https://github.com/AlexsLemonade/refinebio/issues/86#issuecomment-379308817
### Problem or idea
Seems like we are not parsing the first column correctly, there's trailing whitespace on `'ID_REF '`, and that's causing the job to fail in
https://github.com/AlexsLemonade/refinebio/blob/dev/workers/data_refinery_workers/processors/illumina.py#L115-L121
There're more samples with the same error (~14k), however, the fix doesn't seem to be as easy for the others.
<details><summary>queries</summary>
```sql
select count(*)
from (
select samples.accession_code, (
select processor_jobs.failure_reason
from original_file_sample_associations, processorjob_originalfile_associations, processor_jobs
where samples.id=original_file_sample_associations.sample_id
and original_file_sample_associations.original_file_id=processorjob_originalfile_associations.original_file_id
and processorjob_originalfile_associations.processor_job_id=processor_jobs.id
order by processor_jobs.start_time desc
limit 1
) as job_failure_reason
from samples
where samples.is_processed='f'
) tt
where job_failure_reason like 'Could not find any ID column in headers [''ID_REF '',%';
```
```sql
select count(*)
from (
select samples.accession_code, (
select processor_jobs.failure_reason
from original_file_sample_associations, processorjob_originalfile_associations, processor_jobs
where samples.id=original_file_sample_associations.sample_id
and original_file_sample_associations.original_file_id=processorjob_originalfile_associations.original_file_id
and processorjob_originalfile_associations.processor_job_id=processor_jobs.id
order by processor_jobs.start_time desc
limit 1
) as job_failure_reason
from samples
where samples.is_processed='f'
) tt
where job_failure_reason like 'Could not find any ID column in headers%';
count
-------
14389
(1 row)
```
</details>
### Solution or next step
This looks like an easy fix, I'll ensure trailing whitespaces are removed from columns names.
| foreman/data_refinery_foreman/surveyor/geo.py
<|code_start|>import shutil
from re import sub
from typing import Dict, List
import dateutil.parser
import GEOparse
from data_refinery_common.enums import Downloaders
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
Experiment,
ExperimentAnnotation,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
Organism,
OriginalFile,
OriginalFileSampleAssociation,
Sample,
SampleAnnotation,
SurveyJobKeyValue,
)
from data_refinery_common.utils import (
FileUtils,
get_normalized_platform,
get_readable_affymetrix_names,
get_supported_microarray_platforms,
get_supported_rnaseq_platforms,
)
from data_refinery_foreman.surveyor import harmony, utils
from data_refinery_foreman.surveyor.external_source import ExternalSourceSurveyor
logger = get_and_configure_logger(__name__)
GEOparse.logger.set_verbosity("WARN")
UNKNOWN = "UNKNOWN"
class GeoSurveyor(ExternalSourceSurveyor):
"""Surveys NCBI GEO for data.
Implements the ExternalSourceSurveyor interface.
"""
def source_type(self):
return Downloaders.GEO.value
def get_temp_path(self):
return "/tmp/" + str(self.survey_job.id) + "/"
def set_platform_properties(
self, sample_object: Sample, sample_metadata: Dict, gse: GEOparse.GSM
) -> Sample:
"""Sets platform-related properties on `sample_object`.
Uses metadata from `gse` to populate platform_name,
platform_accession_code, and technology on `sample_object`.
"""
# Determine platform information
external_accession = get_normalized_platform(gse.metadata.get("platform_id", [UNKNOWN])[0])
if external_accession == UNKNOWN:
sample_object.platform_accession_code = UNKNOWN
sample_object.platform_name = UNKNOWN
sample_object.manufacturer = UNKNOWN
# If this sample is Affy, we potentially can extract the
# platform information from the .CEL file. If it's not we
# can't do anything. Therefore assume the technology is
# microarray when we have no platform information.
sample_object.technology = "MICROARRAY"
return sample_object
platform_accession_code = UNKNOWN
gpl = GEOparse.get_GEO(external_accession, destdir=self.get_temp_path(), silent=True)
platform_title = gpl.metadata.get("title", [UNKNOWN])[0]
# Check if this is a supported microarray platform.
for platform in get_supported_microarray_platforms():
if platform["external_accession"] == external_accession:
platform_accession_code = platform["platform_accession"]
if platform_accession_code != UNKNOWN:
# It's a supported microarray platform.
# We are using the brain array package as the platform accession code,
# so, for instance, GPL3213 becomes 'chicken'.
sample_object.platform_accession_code = platform_accession_code
sample_object.technology = "MICROARRAY"
try:
# Related: https://github.com/AlexsLemonade/refinebio/issues/354
# If it's Affy we can get a readable name:
sample_object.platform_name = get_readable_affymetrix_names()[
platform_accession_code
]
sample_object.manufacturer = "AFFYMETRIX"
# Sometimes Affymetrix samples have weird channel
# protocol metadata, so if we find that it's
# Affymetrix return it now. Example: GSE113945
return sample_object
except KeyError:
# Otherwise we'll use what we've got.
sample_object.platform_name = platform_title
# Determine manufacturer
platform = sample_object.pretty_platform.upper()
if "AGILENT" in platform:
sample_object.manufacturer = "AGILENT"
elif "ILLUMINA" in platform or "NEXTSEQ" in platform:
sample_object.manufacturer = "ILLUMINA"
elif "AFFYMETRIX" in platform:
sample_object.manufacturer = "AFFYMETRIX"
else:
sample_object.manufacturer = UNKNOWN
return sample_object
# Check to see if this is a supported RNASeq technology:
# GEO RNASeq platform titles often have organisms appended to
# an otherwise recognizable platform. The list of supported
# RNASeq platforms isn't long, so see if any of them are
# contained within what GEO gave us.
# Example: GSE69572 has a platform title of:
# 'Illumina Genome Analyzer IIx (Glycine max)'
# Which should really just be 'Illumina Genome Analyzer IIx'
# because RNASeq platforms are organism agnostic. However,
# the platforms 'Illumina Genome Analyzer' and 'Illumina
# Genome Analyzer II' would also be matched, so make sure that
# the longest platform names are tested first:
sorted_platform_list = get_supported_rnaseq_platforms().copy()
sorted_platform_list.sort(key=len, reverse=True)
for platform in sorted_platform_list:
if platform.upper() in platform_title.upper():
sample_object.technology = "RNA-SEQ"
sample_object.platform_name = platform
# We just use RNASeq platform titles as accessions
sample_object.platform_accession_code = platform
if "ILLUMINA" in sample_object.platform_name.upper():
sample_object.manufacturer = "ILLUMINA"
elif "NEXTSEQ" in sample_object.platform_name.upper():
sample_object.manufacturer = "NEXTSEQ"
elif "ION TORRENT" in sample_object.platform_name.upper():
sample_object.manufacturer = "ION_TORRENT"
else:
sample_object.manufacturer = UNKNOWN
return sample_object
# If we've made it this far, we don't know what this platform
# is, therefore we can't know what its technology is. What we
# do know is what GEO said was it's platform's accession and
# title are, and that it's unsupported.
sample_object.platform_name = platform_title
sample_object.platform_accession_code = external_accession
sample_object.technology = UNKNOWN
sample_object.manufacturer = UNKNOWN
return sample_object
def get_miniml_url(self, experiment_accession_code):
""" Build the URL for the MINiML files for this accession code.
ex:
'GSE68061' -> 'ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE68nnn/GSE68061/miniml/GSE68061_family.xml.tgz'
"""
geo = experiment_accession_code.upper()
range_subdir = sub(r"\d{1,3}$", "nnn", geo)
min_url_template = (
"ftp://ftp.ncbi.nlm.nih.gov/geo/" "series/{range_subdir}/{record}/miniml/{record_file}"
)
min_url = min_url_template.format(
range_subdir=range_subdir, record=geo, record_file="%s_family.xml.tgz" % geo
)
return min_url
@staticmethod
def get_sample_protocol_info(sample_metadata, sample_accession_code):
protocol_info = dict()
if "extract_protocol_ch1" in sample_metadata:
protocol_info["Extraction protocol"] = sample_metadata["extract_protocol_ch1"]
if "label_protocol_ch1" in sample_metadata:
protocol_info["Label protocol"] = sample_metadata["label_protocol_ch1"]
if "hyb_protocol" in sample_metadata:
protocol_info["Hybridization protocol"] = sample_metadata["hyb_protocol"]
if "scan_protocol" in sample_metadata:
protocol_info["Scan protocol"] = sample_metadata["scan_protocol"]
if "data_processing" in sample_metadata:
protocol_info["Data processing"] = sample_metadata["data_processing"]
protocol_info["Reference"] = (
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + sample_accession_code
)
return protocol_info
@staticmethod
def _apply_harmonized_metadata_to_sample(sample: Sample, harmonized_metadata: dict):
"""Applies the harmonized metadata to `sample`"""
for key, value in harmonized_metadata.items():
setattr(sample, key, value)
@staticmethod
def _apply_metadata_to_experiment(experiment: Experiment, gse):
""" Gets the metadata out of gse and applies it to the experiment"""
experiment.source_url = (
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + experiment.accession_code
)
experiment.source_database = "GEO"
experiment.title = gse.metadata.get("title", [""])[0]
experiment.description = gse.metadata.get("summary", [""])[0]
# Source doesn't provide time information, assume midnight.
submission_date = gse.metadata["submission_date"][0] + " 00:00:00 UTC"
experiment.source_first_published = dateutil.parser.parse(submission_date)
last_updated_date = gse.metadata["last_update_date"][0] + " 00:00:00 UTC"
experiment.source_last_updated = dateutil.parser.parse(last_updated_date)
unique_institutions = list(set(gse.metadata["contact_institute"]))
experiment.submitter_institution = ", ".join(unique_institutions)
experiment.pubmed_id = gse.metadata.get("pubmed_id", [""])[0]
# Scrape publication title and authorship from Pubmed
if experiment.pubmed_id:
pubmed_metadata = utils.get_title_and_authors_for_pubmed_id(experiment.pubmed_id)
experiment.publication_title = pubmed_metadata[0]
experiment.publication_authors = pubmed_metadata[1]
def create_experiment_and_samples_from_api(
self, experiment_accession_code
) -> (Experiment, List[Sample]):
""" The main surveyor - find the Experiment and Samples from NCBI GEO.
Uses the GEOParse library, for which docs can be found here: https://geoparse.readthedocs.io/en/latest/usage.html#working-with-geo-objects
"""
# Cleaning up is tracked here: https://github.com/guma44/GEOparse/issues/41
gse = GEOparse.get_GEO(experiment_accession_code, destdir=self.get_temp_path(), silent=True)
harmonizer = harmony.Harmonizer()
# Create the experiment object
try:
experiment_object = Experiment.objects.get(accession_code=experiment_accession_code)
logger.debug(
"Experiment %s already exists, skipping object creation.",
experiment_accession_code,
survey_job=self.survey_job.id,
)
except Experiment.DoesNotExist:
experiment_object = Experiment()
experiment_object.accession_code = experiment_accession_code
GeoSurveyor._apply_metadata_to_experiment(experiment_object, gse)
experiment_object.save()
experiment_annotation = ExperimentAnnotation()
experiment_annotation.data = gse.metadata
experiment_annotation.experiment = experiment_object
experiment_annotation.is_ccdl = False
experiment_annotation.save()
# Okay, here's the situation!
# Sometimes, samples have a direct single representation for themselves.
# Othertimes, there is a single file with references to every sample in it.
created_samples = []
for sample_accession_code, sample in gse.gsms.items():
try:
sample_object = Sample.objects.get(accession_code=sample_accession_code)
logger.debug(
"Sample %s from experiment %s already exists, skipping object creation.",
sample_accession_code,
experiment_object.accession_code,
survey_job=self.survey_job.id,
)
# Associate it with the experiment, but since it
# already exists it already has original files
# associated with it and it's already been downloaded,
# so don't add it to created_samples.
ExperimentSampleAssociation.objects.get_or_create(
experiment=experiment_object, sample=sample_object
)
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment_object, organism=sample_object.organism
)
except Sample.DoesNotExist:
organism = Organism.get_object_for_name(sample.metadata["organism_ch1"][0].upper())
sample_object = Sample()
sample_object.source_database = "GEO"
sample_object.accession_code = sample_accession_code
sample_object.organism = organism
# If data processing step, it isn't raw.
sample_object.has_raw = not sample.metadata.get("data_processing", None)
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment_object, organism=organism
)
sample_object.title = sample.metadata["title"][0]
self.set_platform_properties(sample_object, sample.metadata, gse)
preprocessed_sample = harmony.preprocess_geo_sample(sample)
harmonized_sample = harmonizer.harmonize_sample(preprocessed_sample)
GeoSurveyor._apply_harmonized_metadata_to_sample(sample_object, harmonized_sample)
# Sample-level protocol_info
sample_object.protocol_info = self.get_sample_protocol_info(
sample.metadata, sample_accession_code
)
sample_object.save()
logger.debug("Created Sample: " + str(sample_object))
metadata = sample.metadata
metadata["geo_columns"] = list(sample.columns.index)
sample_annotation = SampleAnnotation()
sample_annotation.sample = sample_object
sample_annotation.data = metadata
sample_annotation.is_ccdl = False
sample_annotation.save()
sample_supplements = sample.metadata.get("supplementary_file", [])
for supplementary_file_url in sample_supplements:
# Why do they give us this?
if supplementary_file_url == "NONE":
break
# We never want these!
if "idat.gz" in supplementary_file_url.lower():
continue
if ".chp" in supplementary_file_url.lower():
continue
if "chp.gz" in supplementary_file_url.lower():
continue
if "ndf.gz" in supplementary_file_url.lower():
continue
if "pos.gz" in supplementary_file_url.lower():
continue
if "pair.gz" in supplementary_file_url.lower():
continue
if "gff.gz" in supplementary_file_url.lower():
continue
# Sometimes, we are lied to about the data processing step.
lower_file_url = supplementary_file_url.lower()
if (
".cel" in lower_file_url
or ("_non_normalized.txt" in lower_file_url)
or ("_non-normalized.txt" in lower_file_url)
or ("-non-normalized.txt" in lower_file_url)
or ("-non_normalized.txt" in lower_file_url)
):
sample_object.has_raw = True
sample_object.save()
# filename and source_filename are the same for these
filename = FileUtils.get_filename(supplementary_file_url)
original_file = OriginalFile.objects.get_or_create(
source_url=supplementary_file_url,
filename=filename,
source_filename=filename,
has_raw=sample_object.has_raw,
is_archive=FileUtils.is_archive(filename),
)[0]
logger.debug("Created OriginalFile: " + str(original_file))
OriginalFileSampleAssociation.objects.get_or_create(
original_file=original_file, sample=sample_object
)
if original_file.is_affy_data():
# Only Affymetrix Microarrays produce .CEL files
sample_object.technology = "MICROARRAY"
sample_object.manufacturer = "AFFYMETRIX"
sample_object.save()
# It's okay to survey RNA-Seq samples from GEO, but we
# don't actually want to download/process any RNA-Seq
# data unless it comes from SRA.
if sample_object.technology != "RNA-SEQ":
created_samples.append(sample_object)
# Now that we've determined the technology at the
# sample level, we can set it at the experiment level,
# just gotta make sure to only do it once. There can
# be more than one technology, this should be changed
# as part of:
# https://github.com/AlexsLemonade/refinebio/issues/1099
if not experiment_object.technology:
experiment_object.technology = sample_object.technology
experiment_object.save()
ExperimentSampleAssociation.objects.get_or_create(
experiment=experiment_object, sample=sample_object
)
# These supplementary files _may-or-may-not_ contain the type of raw data we can process.
for experiment_supplement_url in gse.metadata.get("supplementary_file", []):
# filename and source_filename are the same for these
filename = experiment_supplement_url.split("/")[-1]
original_file = OriginalFile.objects.get_or_create(
source_url=experiment_supplement_url,
filename=filename,
source_filename=filename,
has_raw=sample_object.has_raw,
is_archive=True,
)[0]
logger.debug("Created OriginalFile: " + str(original_file))
lower_supplement_url = experiment_supplement_url.lower()
if (
("_non_normalized.txt" in lower_supplement_url)
or ("_non-normalized.txt" in lower_supplement_url)
or ("-non-normalized.txt" in lower_supplement_url)
or ("-non_normalized.txt" in lower_supplement_url)
):
for sample_object in created_samples:
sample_object.has_raw = True
sample_object.save()
OriginalFileSampleAssociation.objects.get_or_create(
sample=sample_object, original_file=original_file
)
# Delete this Original file if it isn't being used.
if (
OriginalFileSampleAssociation.objects.filter(original_file=original_file).count()
== 0
):
original_file.delete()
# These are the Miniml/Soft/Matrix URLs that are always(?) provided.
# GEO describes different types of data formatting as "families"
family_url = self.get_miniml_url(experiment_accession_code)
miniml_original_file = OriginalFile.objects.get_or_create(
source_url=family_url,
source_filename=family_url.split("/")[-1],
has_raw=sample_object.has_raw,
is_archive=True,
)[0]
for sample_object in created_samples:
# We don't need a .txt if we have a .CEL
if sample_object.has_raw:
continue
OriginalFileSampleAssociation.objects.get_or_create(
sample=sample_object, original_file=miniml_original_file
)
# Delete this Original file if it isn't being used.
if (
OriginalFileSampleAssociation.objects.filter(original_file=miniml_original_file).count()
== 0
):
miniml_original_file.delete()
# Trash the temp path
try:
shutil.rmtree(self.get_temp_path())
except Exception:
# There was a problem during surveying so this didn't get created.
# It's not a big deal.
pass
return experiment_object, created_samples
def discover_experiment_and_samples(self) -> (Experiment, List[Sample]):
""" Dispatches the surveyor, returns the results """
experiment_accession_code = SurveyJobKeyValue.objects.get(
survey_job_id=self.survey_job.id, key__exact="experiment_accession_code"
).value
logger.debug(
"Surveying experiment with accession code: %s.",
experiment_accession_code,
survey_job=self.survey_job.id,
)
experiment, samples = self.create_experiment_and_samples_from_api(experiment_accession_code)
return experiment, samples
<|code_end|>
scripts/run_full_pipeline.py
<|code_start|>import argparse
import json
import subprocess
def parse_args():
description = """This script can be used to run the full pipeline.
This includes surveyor jobs queuing downloader jobs, which in turn should queue processor jobs.
This includes Microarray experiments, RNA-Seq experiments, and transcriptome indices.
This script must be run from the top level directory of the refinebio repository.!"""
parser = argparse.ArgumentParser(description=description)
accession_help_text = """Specify the accession_code of the experiment to survey.
If creating a Transcriptome Index an organism name like 'Homo
Sapiens' should be supplied if the organism is in the main
division of Ensembl. If the organism is in any other division it
should be specified like 'Arabidopsis thaliana, EnsemblPlants'."""
parser.add_argument("accession_code", help=accession_help_text)
return parser.parse_args()
def get_job_to_run():
completed_command = subprocess.run(
["./foreman/run_surveyor.sh", "get_job_to_be_run"], stdout=subprocess.PIPE,
)
# The JSON output is on the last line of the output, but it has a
# newline as well.
last_line = completed_command.stdout.decode("utf-8").split("\n")[-2].strip()
if completed_command.returncode == 0:
return json.loads(last_line)
def run_job(job):
job_name = job["job_name"]
job_id = job["job_id"]
print(f"Running {job_name} Job with id {job_id}!")
image_name = ""
if job["job_type"] == "DownloaderJob":
subcommand = "run_downloader_job"
image_name = "downloaders"
else:
subcommand = "run_processor_job"
if job_name == "AFFY_TO_PCL":
image_name = "affymetrix"
elif job_name == "SALMON":
image_name = "salmon"
elif job_name == "ILLUMINA_TO_PCL":
image_name == "illumina"
elif job_name in ["TRANSCRIPTOME_INDEX_LONG", "TRANSCRIPTOME_INDEX_SHORT"]:
image_name = "transcriptome"
elif job_name == "NO_OP":
image_name = "no_op"
subprocess.check_call(
[
"./workers/run_job.sh",
"-i",
image_name,
subcommand,
f"--job-name={job_name}",
f"--job-id={job_id}",
]
)
def survey_accession(accession_code):
subprocess.check_call(
["./foreman/run_surveyor.sh", "survey_all", "--accession", accession_code]
)
def run_full_pipeline(accession_code):
survey_accession(accession_code)
job_to_run = get_job_to_run()
while job_to_run:
run_job(job_to_run)
job_to_run = get_job_to_run()
if __name__ == "__main__":
args = parse_args()
run_full_pipeline(args.accession_code)
<|code_end|>
workers/data_refinery_workers/processors/illumina.py
<|code_start|>import csv
import multiprocessing
import os
import re
import subprocess
from typing import Dict
from django.utils import timezone
import numpy as np
import pandas as pd
from data_refinery_common.enums import PipelineEnum, ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
Pipeline,
ProcessorJob,
ProcessorJobOriginalFileAssociation,
Sample,
SampleAnnotation,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
logger = get_and_configure_logger(__name__)
def _prepare_files(job_context: Dict) -> Dict:
"""Adds the keys "input_file_path" and "output_file_path" to
job_context so everything is prepared for processing.
"""
# All files for the job are in the same directory.
job_context["work_dir"] = (
LOCAL_ROOT_DIR + "/" + "processor_job_" + str(job_context["job_id"]) + "/"
)
os.makedirs(job_context["work_dir"], exist_ok=True)
original_file = job_context["original_files"][0]
sanitized_filename = original_file.absolute_file_path.split("/")[-1] + ".sanitized"
job_context["input_file_path"] = job_context["work_dir"] + sanitized_filename
new_filename = original_file.absolute_file_path.split("/")[-1].replace(".txt", ".PCL")
job_context["output_file_path"] = job_context["work_dir"] + new_filename
# Sanitize this file so R doesn't choke.
# Some have comments, some have non-comment-comments.
with open(original_file.absolute_file_path, "r") as file_input:
with open(job_context["input_file_path"], "w") as file_output:
for line in file_input:
if (
"#" not in line
and line.strip() != ""
and line != "\n"
and "\t" in line
and line[0:3].upper() != "GSM"
and line[0] != "'"
and line[0] != '"'
and line[0] != "!"
and line[0] != "/"
and line[0] != "<"
and line[0] != "\t"
):
file_output.write(line)
return job_context
def _detect_columns(job_context: Dict) -> Dict:
"""Detect which columns match to which inputs.
Related: https://github.com/AlexsLemonade/refinebio/issues/86#issuecomment-379308817
We need to find:
First column should be ID_REF or PROBE_ID and the type should be string.
Detection Pval column
Expression column (contains sample title and NOT 'BEAD')
Header examples:
['ID_REF', 'LV-C&si-Control-1', 'Detection Pval',
'LV-C&si-Control-2', 'Detection Pval', 'LV-C&si-Control-3', 'Detection
Pval', 'LV-C&si-EZH2-1', 'Detection Pval', 'LV-C&si-EZH2-2', 'Detection
Pval', 'LV-C&si-EZH2-3', 'Detection Pval', 'LV-EZH2&si-EZH2-1',
'Detection Pval', 'LV-EZH2&si-EZH2-2', 'Detection Pval', 'LV-EZH2&si-
EZH2-3', 'Detection Pval', 'LV-T350A&si-EZH2-1', 'Detection Pval', 'LV-
T350A&si-EZH2-2', 'Detection Pval', 'LV-T350A&si-EZH2-3', 'Detection
Pval']
Adds the following keys to job_context:
columnIds: the identifiers of columns which contain expression data
probeId: which is the value of the column containing the probe identifiers.
detectionPval: a string which identifies Pvalue columns
"""
try:
input_file = job_context["input_file_path"]
headers = None
with open(input_file, "r") as tsv_in:
tsv_in = csv.reader(tsv_in, delimiter="\t")
for row in tsv_in:
headers = row
break
# Ex GSE45331_non-normalized.txt
predicted_header = 0
if headers[0].upper() in ["TARGETID", "TARGET_ID"]:
predicted_header = 1
# First the probe ID column
if headers[predicted_header].upper() not in [
"ID_REF",
"PROBE_ID",
"IDREF",
"PROBEID",
"REF_ID",
"REFID",
"IDPROBE",
"ID_PROBE",
]:
job_context["job"].failure_reason = (
"Could not find any ID column in headers "
+ str(headers)
+ " for file "
+ job_context["input_file_path"]
)
job_context["success"] = False
return job_context
else:
job_context["probeId"] = headers[predicted_header]
# Then check to make sure a detection pvalue exists, which is always(?) some form of
# 'Detection Pval'
for header in headers:
# check if header contains something like "detection pval"
pvalue_header = re.search(r"(detection)(\W?)(pval\w*)", header, re.IGNORECASE)
if pvalue_header:
break
else:
job_context["job"].failure_reason = "Could not detect PValue column!"
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
# Then, finally, create an absolutely bonkers regular expression
# which will explicitly hit on any sample which contains a sample
# ID _and_ ignores the magical word 'BEAD', etc. Great!
column_ids = set()
for sample in job_context["samples"]:
for offset, header in enumerate(headers, start=1):
if sample.title == header:
column_ids.add(offset)
continue
# Sometimes the title might actually be in the description field.
# To find this, look in all the related SampleAnnotations.
# Since there are multiple annotations, we need to break early before continuing.
# Related: https://github.com/AlexsLemonade/refinebio/issues/499
continue_me = False
for annotation in sample.sampleannotation_set.filter(is_ccdl=False):
try:
if annotation.data.get("description", "")[0] == header:
column_ids.add(offset)
continue_me = True
break
except Exception:
pass
if continue_me:
# Treat the header as the real title, as we will need it later.
sample.title = header
sample.save()
continue
if header.upper().replace(" ", "_") == "RAW_VALUE":
column_ids.add(offset)
continue
if (
sample.title.upper() in header.upper()
and "BEAD" not in header.upper()
and "NARRAYS" not in header.upper()
and "ARRAY_STDEV" not in header.upper()
and "PVAL" not in header.upper().replace(" ", "").replace("_", "")
):
column_ids.add(offset)
continue
for offset, header in enumerate(headers, start=1):
if "AVG_Signal" in header:
column_ids.add(offset)
continue
job_context["columnIds"] = ",".join(map(lambda id: str(id), column_ids))
except Exception as e:
job_context["job"].failure_reason = str(e)
job_context["success"] = False
logger.exception(
"Failed to extract columns in " + job_context["input_file_path"], exception=str(e)
)
job_context["job"].no_retry = True
return job_context
return job_context
def _detect_platform(job_context: Dict) -> Dict:
"""
Determine the platform/database to process this sample with.
They often provide something like "V2" or "V 2", but we don't trust them so we detect it ourselves.
Related: https://github.com/AlexsLemonade/refinebio/issues/232
"""
all_databases = {
"HOMO_SAPIENS": [
"illuminaHumanv1",
"illuminaHumanv2",
"illuminaHumanv3",
"illuminaHumanv4",
],
"MUS_MUSCULUS": ["illuminaMousev1", "illuminaMousev1p1", "illuminaMousev2",],
"RATTUS_NORVEGICUS": ["illuminaRatv1"],
}
sample0 = job_context["samples"][0]
databases = all_databases[sample0.organism.name]
# Loop over all of the possible platforms and find the one with the best match.
highest = 0.0
high_mapped_percent = 0.0
high_db = None
for platform in databases:
try:
result = subprocess.check_output(
[
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/detect_database.R",
"--platform",
platform,
"--inputFile",
job_context["input_file_path"],
"--column",
job_context["probeId"],
]
)
results = result.decode().split("\n")
cleaned_result = float(results[0].strip())
if cleaned_result > highest:
highest = cleaned_result
high_db = platform
high_mapped_percent = float(results[1].strip())
except Exception as e:
logger.exception(e, processor_job_id=job_context["job"].id)
continue
# Record our sample detection outputs for every sample.
for sample in job_context["samples"]:
sa = SampleAnnotation()
sa.sample = sample
sa.is_ccdl = True
sa.data = {
"detected_platform": high_db,
"detection_percentage": highest,
"mapped_percentage": high_mapped_percent,
}
sa.save()
# If the match is over 75%, record this and process it on that platform.
if high_mapped_percent > 75.0:
job_context["platform"] = high_db
# The match percentage is too low - send this to the no-opper instead.
else:
logger.info("Match percentage too low, NO_OP'ing and aborting.", job=job_context["job_id"])
processor_job = ProcessorJob()
processor_job.downloader_job = job_context["job"].downloader_job
processor_job.pipeline_applied = "NO_OP"
processor_job.ram_amount = job_context["job"].ram_amount
processor_job.save()
assoc = ProcessorJobOriginalFileAssociation()
assoc.original_file = job_context["original_files"][0]
assoc.processor_job = processor_job
assoc.save()
try:
send_job(ProcessorPipeline.NO_OP, processor_job)
except Exception as e:
# Batch dispatch error, likely during local test.
logger.error(e, job=processor_job)
job_context["abort"] = True
return job_context
def _run_illumina(job_context: Dict) -> Dict:
"""Processes an input TXT file to an output PCL file using a custom R script.
Expects a job_context which has been pre-populated with inputs, outputs
and the column identifiers which the R script needs for processing.
"""
try:
job_context["time_start"] = timezone.now()
formatted_command = [
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/illumina.R",
"--probeId",
job_context["probeId"],
"--expression",
job_context["columnIds"],
"--platform",
job_context["platform"],
"--inputFile",
job_context["input_file_path"],
"--outputFile",
job_context["output_file_path"],
"--cores",
str(multiprocessing.cpu_count()),
]
subprocess.check_output(formatted_command)
job_context["formatted_command"] = " ".join(formatted_command)
job_context["time_end"] = timezone.now()
except Exception as e:
error_template = (
"Encountered error in R code while running illumina.R"
" pipeline during processing of {0}: {1}"
)
error_message = error_template.format(job_context["input_file_path"], str(e))
logger.error(error_message, processor_job=job_context["job_id"])
job_context["job"].failure_reason = error_message
job_context["success"] = False
return job_context
def _get_sample_for_column(column: str, job_context: Dict) -> Sample:
# First of all check if the title is the column name
try:
return job_context["samples"].get(title=column)
except Sample.DoesNotExist:
pass
# If the column name is not the title, maybe they used the convention
# <SAMPLE_TITLE>(.AVG)?_Signal
title_match = re.match(r"(?P<title>.*?)(\.AVG)?_Signal", column)
if title_match is not None:
try:
return job_context["samples"].get(title=title_match.group("title"))
except Sample.DoesNotExist:
pass
# Or maybe they also have a named detection pvalue column using the same
# naming scheme
name_match = re.match(r"(?P<name>.*)\.AVG_Signal", column)
if name_match is not None:
try:
return job_context["samples"].get(
sampleannotation__data__geo_columns__contains="{}.Detection Pval".format(
name_match.group("name")
)
)
except Sample.DoesNotExist:
pass
return None
def _create_result_objects(job_context: Dict) -> Dict:
"""Create the ComputationalResult objects after a Scan run is complete"""
result = ComputationalResult()
result.commands.append(job_context["formatted_command"])
result.is_ccdl = True
result.is_public = True
result.time_start = job_context["time_start"]
result.time_end = job_context["time_end"]
try:
processor_key = "ILLUMINA_SCAN"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context["pipeline"].steps.append(result.id)
# Split the result into smashable subfiles
big_tsv = job_context["output_file_path"]
data = pd.read_csv(big_tsv, sep="\t", header=0, index_col=0)
individual_files = []
frames = np.split(data, len(data.columns), axis=1)
for frame in frames:
filename = (
frame.columns.values[0].replace("&", "").replace("*", "").replace(";", "") + ".tsv"
)
frame_path = job_context["work_dir"] + filename
frame.to_csv(frame_path, sep="\t", encoding="utf-8")
# This needs to be the same as the ones in the job context!
sample = _get_sample_for_column(frame.columns.values[0], job_context)
if sample is None:
job_context["job"].failure_reason = (
"Could not find sample for column "
+ frame.columns.values[0]
+ " while splitting Illumina file "
+ big_tsv
)
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
computed_file = ComputedFile()
computed_file.absolute_file_path = frame_path
computed_file.filename = frame_path.split("/")[-1]
computed_file.result = result
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.is_public = True
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.save()
job_context["computed_files"].append(computed_file)
SampleResultAssociation.objects.get_or_create(sample=sample, result=result)
SampleComputedFileAssociation.objects.get_or_create(
sample=sample, computed_file=computed_file
)
individual_files.append(computed_file)
logger.debug("Created %s", result)
job_context["success"] = True
job_context["individual_files"] = individual_files
job_context["result"] = result
return job_context
def illumina_to_pcl(job_id: int) -> None:
pipeline = Pipeline(name=PipelineEnum.ILLUMINA.value)
return utils.run_pipeline(
{"job_id": job_id, "pipeline": pipeline},
[
utils.start_job,
_prepare_files,
_detect_columns,
_detect_platform,
_run_illumina,
_create_result_objects,
utils.end_job,
],
)
<|code_end|>
| foreman/data_refinery_foreman/surveyor/geo.py
<|code_start|>import shutil
from re import sub
from typing import Dict, List
import dateutil.parser
import GEOparse
from data_refinery_common.enums import Downloaders
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
Experiment,
ExperimentAnnotation,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
Organism,
OriginalFile,
OriginalFileSampleAssociation,
Sample,
SampleAnnotation,
SurveyJobKeyValue,
)
from data_refinery_common.utils import (
FileUtils,
get_normalized_platform,
get_readable_affymetrix_names,
get_supported_microarray_platforms,
get_supported_rnaseq_platforms,
)
from data_refinery_foreman.surveyor import harmony, utils
from data_refinery_foreman.surveyor.external_source import ExternalSourceSurveyor
logger = get_and_configure_logger(__name__)
GEOparse.logger.set_verbosity("WARN")
UNKNOWN = "UNKNOWN"
class GeoSurveyor(ExternalSourceSurveyor):
"""Surveys NCBI GEO for data.
Implements the ExternalSourceSurveyor interface.
"""
def source_type(self):
return Downloaders.GEO.value
def get_temp_path(self):
return "/tmp/" + str(self.survey_job.id) + "/"
def set_platform_properties(
self, sample_object: Sample, sample_metadata: Dict, gse: GEOparse.GSM
) -> Sample:
"""Sets platform-related properties on `sample_object`.
Uses metadata from `gse` to populate platform_name,
platform_accession_code, and technology on `sample_object`.
"""
# Determine platform information
external_accession = get_normalized_platform(gse.metadata.get("platform_id", [UNKNOWN])[0])
if external_accession == UNKNOWN:
sample_object.platform_accession_code = UNKNOWN
sample_object.platform_name = UNKNOWN
sample_object.manufacturer = UNKNOWN
# If this sample is Affy, we potentially can extract the
# platform information from the .CEL file. If it's not we
# can't do anything. Therefore assume the technology is
# microarray when we have no platform information.
sample_object.technology = "MICROARRAY"
return sample_object
platform_accession_code = UNKNOWN
gpl = GEOparse.get_GEO(external_accession, destdir=self.get_temp_path(), silent=True)
platform_title = gpl.metadata.get("title", [UNKNOWN])[0]
# Check if this is a supported microarray platform.
for platform in get_supported_microarray_platforms():
if platform["external_accession"] == external_accession:
platform_accession_code = platform["platform_accession"]
if platform_accession_code != UNKNOWN:
# It's a supported microarray platform.
# We are using the brain array package as the platform accession code,
# so, for instance, GPL3213 becomes 'chicken'.
sample_object.platform_accession_code = platform_accession_code
sample_object.technology = "MICROARRAY"
try:
# Related: https://github.com/AlexsLemonade/refinebio/issues/354
# If it's Affy we can get a readable name:
sample_object.platform_name = get_readable_affymetrix_names()[
platform_accession_code
]
sample_object.manufacturer = "AFFYMETRIX"
# Sometimes Affymetrix samples have weird channel
# protocol metadata, so if we find that it's
# Affymetrix return it now. Example: GSE113945
return sample_object
except KeyError:
# Otherwise we'll use what we've got.
sample_object.platform_name = platform_title
# Determine manufacturer
platform = sample_object.pretty_platform.upper()
if "AGILENT" in platform:
sample_object.manufacturer = "AGILENT"
elif "ILLUMINA" in platform or "NEXTSEQ" in platform:
sample_object.manufacturer = "ILLUMINA"
elif "AFFYMETRIX" in platform:
sample_object.manufacturer = "AFFYMETRIX"
else:
sample_object.manufacturer = UNKNOWN
return sample_object
# Check to see if this is a supported RNASeq technology:
# GEO RNASeq platform titles often have organisms appended to
# an otherwise recognizable platform. The list of supported
# RNASeq platforms isn't long, so see if any of them are
# contained within what GEO gave us.
# Example: GSE69572 has a platform title of:
# 'Illumina Genome Analyzer IIx (Glycine max)'
# Which should really just be 'Illumina Genome Analyzer IIx'
# because RNASeq platforms are organism agnostic. However,
# the platforms 'Illumina Genome Analyzer' and 'Illumina
# Genome Analyzer II' would also be matched, so make sure that
# the longest platform names are tested first:
sorted_platform_list = get_supported_rnaseq_platforms().copy()
sorted_platform_list.sort(key=len, reverse=True)
for platform in sorted_platform_list:
if platform.upper() in platform_title.upper():
sample_object.technology = "RNA-SEQ"
sample_object.platform_name = platform
# We just use RNASeq platform titles as accessions
sample_object.platform_accession_code = platform
if "ILLUMINA" in sample_object.platform_name.upper():
sample_object.manufacturer = "ILLUMINA"
elif "NEXTSEQ" in sample_object.platform_name.upper():
sample_object.manufacturer = "NEXTSEQ"
elif "ION TORRENT" in sample_object.platform_name.upper():
sample_object.manufacturer = "ION_TORRENT"
else:
sample_object.manufacturer = UNKNOWN
return sample_object
# If we've made it this far, we don't know what this platform
# is, therefore we can't know what its technology is. What we
# do know is what GEO said was it's platform's accession and
# title are, and that it's unsupported.
sample_object.platform_name = platform_title
sample_object.platform_accession_code = external_accession
sample_object.technology = UNKNOWN
sample_object.manufacturer = UNKNOWN
return sample_object
def get_miniml_url(self, experiment_accession_code):
"""Build the URL for the MINiML files for this accession code.
ex:
'GSE68061' -> 'ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE68nnn/GSE68061/miniml/GSE68061_family.xml.tgz'
"""
geo = experiment_accession_code.upper()
range_subdir = sub(r"\d{1,3}$", "nnn", geo)
min_url_template = (
"ftp://ftp.ncbi.nlm.nih.gov/geo/" "series/{range_subdir}/{record}/miniml/{record_file}"
)
min_url = min_url_template.format(
range_subdir=range_subdir, record=geo, record_file="%s_family.xml.tgz" % geo
)
return min_url
@staticmethod
def get_sample_protocol_info(sample_metadata, sample_accession_code):
protocol_info = dict()
if "extract_protocol_ch1" in sample_metadata:
protocol_info["Extraction protocol"] = sample_metadata["extract_protocol_ch1"]
if "label_protocol_ch1" in sample_metadata:
protocol_info["Label protocol"] = sample_metadata["label_protocol_ch1"]
if "hyb_protocol" in sample_metadata:
protocol_info["Hybridization protocol"] = sample_metadata["hyb_protocol"]
if "scan_protocol" in sample_metadata:
protocol_info["Scan protocol"] = sample_metadata["scan_protocol"]
if "data_processing" in sample_metadata:
protocol_info["Data processing"] = sample_metadata["data_processing"]
protocol_info["Reference"] = (
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + sample_accession_code
)
return protocol_info
@staticmethod
def _apply_harmonized_metadata_to_sample(sample: Sample, harmonized_metadata: dict):
"""Applies the harmonized metadata to `sample`"""
for key, value in harmonized_metadata.items():
setattr(sample, key, value)
@staticmethod
def _apply_metadata_to_experiment(experiment: Experiment, gse):
"""Gets the metadata out of gse and applies it to the experiment"""
experiment.source_url = (
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + experiment.accession_code
)
experiment.source_database = "GEO"
experiment.title = gse.metadata.get("title", [""])[0]
experiment.description = gse.metadata.get("summary", [""])[0]
# Source doesn't provide time information, assume midnight.
submission_date = gse.metadata["submission_date"][0] + " 00:00:00 UTC"
experiment.source_first_published = dateutil.parser.parse(submission_date)
last_updated_date = gse.metadata["last_update_date"][0] + " 00:00:00 UTC"
experiment.source_last_updated = dateutil.parser.parse(last_updated_date)
unique_institutions = list(set(gse.metadata["contact_institute"]))
experiment.submitter_institution = ", ".join(unique_institutions)
experiment.pubmed_id = gse.metadata.get("pubmed_id", [""])[0]
# Scrape publication title and authorship from Pubmed
if experiment.pubmed_id:
pubmed_metadata = utils.get_title_and_authors_for_pubmed_id(experiment.pubmed_id)
experiment.publication_title = pubmed_metadata[0]
experiment.publication_authors = pubmed_metadata[1]
def create_experiment_and_samples_from_api(
self, experiment_accession_code
) -> (Experiment, List[Sample]):
"""The main surveyor - find the Experiment and Samples from NCBI GEO.
Uses the GEOParse library, for which docs can be found here: https://geoparse.readthedocs.io/en/latest/usage.html#working-with-geo-objects
"""
# Cleaning up is tracked here: https://github.com/guma44/GEOparse/issues/41
gse = GEOparse.get_GEO(experiment_accession_code, destdir=self.get_temp_path(), silent=True)
harmonizer = harmony.Harmonizer()
# Create the experiment object
try:
experiment_object = Experiment.objects.get(accession_code=experiment_accession_code)
logger.debug(
"Experiment %s already exists, skipping object creation.",
experiment_accession_code,
survey_job=self.survey_job.id,
)
except Experiment.DoesNotExist:
experiment_object = Experiment()
experiment_object.accession_code = experiment_accession_code
GeoSurveyor._apply_metadata_to_experiment(experiment_object, gse)
experiment_object.save()
experiment_annotation = ExperimentAnnotation()
experiment_annotation.data = gse.metadata
experiment_annotation.experiment = experiment_object
experiment_annotation.is_ccdl = False
experiment_annotation.save()
# Okay, here's the situation!
# Sometimes, samples have a direct single representation for themselves.
# Othertimes, there is a single file with references to every sample in it.
created_samples = []
for sample_accession_code, sample in gse.gsms.items():
try:
sample_object = Sample.objects.get(accession_code=sample_accession_code)
logger.debug(
"Sample %s from experiment %s already exists, skipping object creation.",
sample_accession_code,
experiment_object.accession_code,
survey_job=self.survey_job.id,
)
# Associate it with the experiment, but since it
# already exists it already has original files
# associated with it and it's already been downloaded,
# so don't add it to created_samples.
ExperimentSampleAssociation.objects.get_or_create(
experiment=experiment_object, sample=sample_object
)
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment_object, organism=sample_object.organism
)
except Sample.DoesNotExist:
organism = Organism.get_object_for_name(sample.metadata["organism_ch1"][0].upper())
sample_object = Sample()
sample_object.source_database = "GEO"
sample_object.accession_code = sample_accession_code
sample_object.organism = organism
# If data processing step, it isn't raw.
sample_object.has_raw = not sample.metadata.get("data_processing", None)
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment_object, organism=organism
)
sample_object.title = sample.metadata["title"][0]
self.set_platform_properties(sample_object, sample.metadata, gse)
preprocessed_sample = harmony.preprocess_geo_sample(sample)
harmonized_sample = harmonizer.harmonize_sample(preprocessed_sample)
GeoSurveyor._apply_harmonized_metadata_to_sample(sample_object, harmonized_sample)
# Sample-level protocol_info
sample_object.protocol_info = self.get_sample_protocol_info(
sample.metadata, sample_accession_code
)
sample_object.save()
logger.debug("Created Sample: " + str(sample_object))
metadata = sample.metadata
metadata["geo_columns"] = list(sample.columns.index)
sample_annotation = SampleAnnotation()
sample_annotation.sample = sample_object
sample_annotation.data = metadata
sample_annotation.is_ccdl = False
sample_annotation.save()
sample_supplements = sample.metadata.get("supplementary_file", [])
for supplementary_file_url in sample_supplements:
# Why do they give us this?
if supplementary_file_url == "NONE":
break
lower_file_url = supplementary_file_url.lower()
# We never want these!
if (
"idat.gz" in lower_file_url
or ".chp" in lower_file_url
or "chp.gz" in lower_file_url
or "ndf.gz" in lower_file_url
or "pos.gz" in lower_file_url
or "pair.gz" in lower_file_url
or "gff.gz" in lower_file_url
or "sdf.gz" in lower_file_url
or "tif.gz" in lower_file_url
or "locs.gz" in lower_file_url
or "grn.xml.gz" in lower_file_url
or "red.xml.gz" in lower_file_url
# As far as I have seen, real Illumina files never come in
# .csv, but plenty of irrelevant data does.
or (sample_object.manufacturer == "ILLUMINA" and "csv.gz" in lower_file_url)
):
continue
# Sometimes, we are lied to about the data processing step.
if (
".cel" in lower_file_url
or ("_non_normalized.txt" in lower_file_url)
or ("_non-normalized.txt" in lower_file_url)
or ("-non-normalized.txt" in lower_file_url)
or ("-non_normalized.txt" in lower_file_url)
):
sample_object.has_raw = True
sample_object.save()
# filename and source_filename are the same for these
filename = FileUtils.get_filename(supplementary_file_url)
original_file = OriginalFile.objects.get_or_create(
source_url=supplementary_file_url,
filename=filename,
source_filename=filename,
has_raw=sample_object.has_raw,
is_archive=FileUtils.is_archive(filename),
)[0]
logger.debug("Created OriginalFile: " + str(original_file))
OriginalFileSampleAssociation.objects.get_or_create(
original_file=original_file, sample=sample_object
)
if original_file.is_affy_data():
# Only Affymetrix Microarrays produce .CEL files
sample_object.technology = "MICROARRAY"
sample_object.manufacturer = "AFFYMETRIX"
sample_object.save()
# It's okay to survey RNA-Seq samples from GEO, but we
# don't actually want to download/process any RNA-Seq
# data unless it comes from SRA.
if sample_object.technology != "RNA-SEQ":
created_samples.append(sample_object)
# Now that we've determined the technology at the
# sample level, we can set it at the experiment level,
# just gotta make sure to only do it once. There can
# be more than one technology, this should be changed
# as part of:
# https://github.com/AlexsLemonade/refinebio/issues/1099
if not experiment_object.technology:
experiment_object.technology = sample_object.technology
experiment_object.save()
ExperimentSampleAssociation.objects.get_or_create(
experiment=experiment_object, sample=sample_object
)
# These supplementary files _may-or-may-not_ contain the type of raw data we can process.
for experiment_supplement_url in gse.metadata.get("supplementary_file", []):
# filename and source_filename are the same for these
filename = experiment_supplement_url.split("/")[-1]
original_file = OriginalFile.objects.get_or_create(
source_url=experiment_supplement_url,
filename=filename,
source_filename=filename,
has_raw=sample_object.has_raw,
is_archive=True,
)[0]
logger.debug("Created OriginalFile: " + str(original_file))
lower_supplement_url = experiment_supplement_url.lower()
if (
("_non_normalized.txt" in lower_supplement_url)
or ("_non-normalized.txt" in lower_supplement_url)
or ("-non-normalized.txt" in lower_supplement_url)
or ("-non_normalized.txt" in lower_supplement_url)
):
for sample_object in created_samples:
sample_object.has_raw = True
sample_object.save()
OriginalFileSampleAssociation.objects.get_or_create(
sample=sample_object, original_file=original_file
)
# Delete this Original file if it isn't being used.
if (
OriginalFileSampleAssociation.objects.filter(original_file=original_file).count()
== 0
):
original_file.delete()
# These are the Miniml/Soft/Matrix URLs that are always(?) provided.
# GEO describes different types of data formatting as "families"
family_url = self.get_miniml_url(experiment_accession_code)
miniml_original_file = OriginalFile.objects.get_or_create(
source_url=family_url,
source_filename=family_url.split("/")[-1],
has_raw=sample_object.has_raw,
is_archive=True,
)[0]
for sample_object in created_samples:
# We don't need a .txt if we have a .CEL
if sample_object.has_raw:
continue
OriginalFileSampleAssociation.objects.get_or_create(
sample=sample_object, original_file=miniml_original_file
)
# Delete this Original file if it isn't being used.
if (
OriginalFileSampleAssociation.objects.filter(original_file=miniml_original_file).count()
== 0
):
miniml_original_file.delete()
# Trash the temp path
try:
shutil.rmtree(self.get_temp_path())
except Exception:
# There was a problem during surveying so this didn't get created.
# It's not a big deal.
pass
return experiment_object, created_samples
def discover_experiment_and_samples(self) -> (Experiment, List[Sample]):
"""Dispatches the surveyor, returns the results"""
experiment_accession_code = SurveyJobKeyValue.objects.get(
survey_job_id=self.survey_job.id, key__exact="experiment_accession_code"
).value
logger.debug(
"Surveying experiment with accession code: %s.",
experiment_accession_code,
survey_job=self.survey_job.id,
)
experiment, samples = self.create_experiment_and_samples_from_api(experiment_accession_code)
return experiment, samples
<|code_end|>
scripts/run_full_pipeline.py
<|code_start|>#!/usr/bin/env python3
import argparse
import json
import subprocess
def parse_args():
description = """This script can be used to run the full pipeline.
This includes surveyor jobs queuing downloader jobs, which in turn should queue processor jobs.
This includes Microarray experiments, RNA-Seq experiments, and transcriptome indices.
This script must be run from the top level directory of the refinebio repository.!"""
parser = argparse.ArgumentParser(description=description)
accession_help_text = """Specify the accession_code of the experiment to survey.
If creating a Transcriptome Index an organism name like 'Homo
Sapiens' should be supplied if the organism is in the main
division of Ensembl. If the organism is in any other division it
should be specified like 'Arabidopsis thaliana, EnsemblPlants'."""
parser.add_argument("accession_code", help=accession_help_text)
return parser.parse_args()
def get_job_to_run():
completed_command = subprocess.run(
["./foreman/run_surveyor.sh", "get_job_to_be_run"], stdout=subprocess.PIPE,
)
# The JSON output is on the last line of the output, but it has a
# newline as well.
last_line = completed_command.stdout.decode("utf-8").split("\n")[-2].strip()
if completed_command.returncode == 0:
return json.loads(last_line)
def run_job(job):
job_name = job["job_name"]
job_id = job["job_id"]
print(f"Running {job_name} Job with id {job_id}!")
image_name = ""
if job["job_type"] == "DownloaderJob":
subcommand = "run_downloader_job"
image_name = "downloaders"
else:
subcommand = "run_processor_job"
if job_name == "AFFY_TO_PCL":
image_name = "affymetrix"
elif job_name == "SALMON":
image_name = "salmon"
elif job_name == "ILLUMINA_TO_PCL":
image_name = "illumina"
elif job_name in ["TRANSCRIPTOME_INDEX_LONG", "TRANSCRIPTOME_INDEX_SHORT"]:
image_name = "transcriptome"
elif job_name == "NO_OP":
image_name = "no_op"
subprocess.check_call(
[
"./workers/run_job.sh",
"-i",
image_name,
subcommand,
f"--job-name={job_name}",
f"--job-id={job_id}",
]
)
def survey_accession(accession_code):
subprocess.check_call(
["./foreman/run_surveyor.sh", "survey_all", "--accession", accession_code]
)
def run_full_pipeline(accession_code):
survey_accession(accession_code)
job_to_run = get_job_to_run()
while job_to_run:
run_job(job_to_run)
job_to_run = get_job_to_run()
if __name__ == "__main__":
args = parse_args()
run_full_pipeline(args.accession_code)
<|code_end|>
workers/data_refinery_workers/processors/illumina.py
<|code_start|>import csv
import multiprocessing
import os
import re
import subprocess
import tempfile
from typing import Dict
from django.utils import timezone
import numpy as np
import pandas as pd
from data_refinery_common.enums import PipelineEnum, ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
Pipeline,
ProcessorJob,
ProcessorJobOriginalFileAssociation,
Sample,
SampleAnnotation,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
logger = get_and_configure_logger(__name__)
def _prepare_files(job_context: Dict) -> Dict:
"""Adds the keys "sanitized_file_path" and "output_file_path" to
job_context so everything is prepared for processing.
"""
# All files for the job are in the same directory.
job_context["work_dir"] = (
LOCAL_ROOT_DIR + "/" + "processor_job_" + str(job_context["job_id"]) + "/"
)
os.makedirs(job_context["work_dir"], exist_ok=True)
original_file = job_context["original_files"][0]
job_context["input_file_path"] = original_file.absolute_file_path
# This should not happen, but if it does I would rather know about it here,
# whereas before we would get random failures later down the pipeline
if not job_context["input_file_path"].endswith(".txt"):
logger.error(
"Input file doesn't have a suffix we recognize, probably an invalid format",
input_file=original_file.absolute_file_path,
)
job_context["job"].failure_reason = "Couldn't recognize the input file format"
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
sanitized_filename = job_context["input_file_path"].split("/")[-1] + ".sanitized"
job_context["sanitized_file_path"] = job_context["work_dir"] + sanitized_filename
new_filename = original_file.absolute_file_path.split("/")[-1].replace(".txt", ".PCL")
job_context["output_file_path"] = job_context["work_dir"] + new_filename
return job_context
def _detect_encoding(job_context: Dict) -> Dict:
"""Some Illumina files are not encoded using utf-8, so we need to use
`file` to detect their encoding"""
try:
encoding = subprocess.check_output(
["file", "--brief", "--mime-encoding", job_context["input_file_path"],],
encoding="utf-8",
).strip()
except subprocess.CalledProcessError as e:
logger.exception(
"Failed to detect the input file's encoding",
processor_job=job_context["job_id"],
input_file=job_context["input_file_path"],
)
job_context["job"].failure_reason = "Failed to detect the input file's encoding"
job_context["success"] = False
job_context["job"].no_retry = True
if encoding not in ["us-ascii", "utf-8", "iso-8859-1"]:
logger.error(
"Input file has unrecognized encoding",
input_file=job_context["input_file_path"],
encoding=encoding,
)
job_context["job"].failure_reason = f"Input file has unrecognized encoding {encoding}"
job_context["success"] = False
job_context["job"].no_retry = True
job_context["encoding"] = encoding
return job_context
def _sanitize_input_file(job_context: Dict) -> Dict:
"""Remove all of the SOFT-specific extensions in the original file (see
https://www.ncbi.nlm.nih.gov/geo/info/soft.html) plus some extra things we
found that make R choke. Also, some files aren't utf-8 encoded, so we will
re-encode them into utf-8."""
with open(job_context["input_file_path"], "r", encoding=job_context["encoding"]) as file_input:
with open(job_context["sanitized_file_path"], "w", encoding="utf-8") as file_output:
for line in file_input:
HEADER_CHARS = ["#", "!", "^"]
stripped_line = line.strip()
if stripped_line == "":
continue
# Sometimes we have a quoted header, so we need to check both
is_header = stripped_line[0] in HEADER_CHARS or (
len(stripped_line) > 1
and stripped_line[0] in ["'", '"']
and stripped_line[1] in HEADER_CHARS
)
# There are some weird lines that start with accession
# codes that don't hold gene measurements
is_accession_line = stripped_line[0:3].upper() == "GSM"
if not is_header and not is_accession_line:
file_output.write(line)
wrote_a_line = True
if not wrote_a_line:
logger.error(
"Filtered every line out of the input file", input_file=job_context["input_file_path"]
)
job_context["job"].failure_reason = "No valid rows detected in the input file"
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
def _convert_sanitized_to_tsv(job_context: Dict) -> Dict:
"""Now that we have removed all of the SOFT-specific extensions, we are left
with some kind of csv/tsv/ssv that may or may not have quoted strings. To
make things easier to parse in R, we will try to sniff these features and
output a uniform format for the R code to read"""
_, tmpfile = tempfile.mkstemp(
suffix=".txt", dir=os.path.dirname(job_context["sanitized_file_path"])
)
with open(job_context["sanitized_file_path"], "r") as file_input:
with open(tmpfile, "w") as file_output:
dialect = csv.Sniffer().sniff(file_input.read(16384), delimiters="\t ,")
file_input.seek(0)
reader = csv.reader(file_input, dialect=dialect)
writer = csv.writer(file_output, delimiter="\t")
reader_iter = iter(reader)
headers = next(reader)
first_content = next(reader)
# Sometimes input files have one less header than they have rows,
# which means that we should interpret the first column as ID_REF.
# The rest of our code expects this explicit header, though, so we
# will insert it here.
if len(headers) == len(first_content) - 1:
headers = ["ID_REF", *headers]
writer.writerow(headers)
writer.writerow(first_content)
for row in reader_iter:
writer.writerow(row)
os.rename(tmpfile, job_context["sanitized_file_path"])
return job_context
def _detect_columns(job_context: Dict) -> Dict:
"""Detect which columns match to which inputs.
Related: https://github.com/AlexsLemonade/refinebio/issues/86#issuecomment-379308817
We need to find:
First column should be ID_REF or PROBE_ID and the type should be string.
Detection Pval column
Expression column (contains sample title and NOT 'BEAD')
Header examples:
['ID_REF', 'LV-C&si-Control-1', 'Detection Pval',
'LV-C&si-Control-2', 'Detection Pval', 'LV-C&si-Control-3', 'Detection
Pval', 'LV-C&si-EZH2-1', 'Detection Pval', 'LV-C&si-EZH2-2', 'Detection
Pval', 'LV-C&si-EZH2-3', 'Detection Pval', 'LV-EZH2&si-EZH2-1',
'Detection Pval', 'LV-EZH2&si-EZH2-2', 'Detection Pval', 'LV-EZH2&si-
EZH2-3', 'Detection Pval', 'LV-T350A&si-EZH2-1', 'Detection Pval', 'LV-
T350A&si-EZH2-2', 'Detection Pval', 'LV-T350A&si-EZH2-3', 'Detection
Pval']
Adds the following keys to job_context:
columnIds: the identifiers of columns which contain expression data
probeId: which is the value of the column containing the probe identifiers.
detectionPval: a string which identifies Pvalue columns
"""
try:
input_file = job_context["sanitized_file_path"]
headers = None
with open(input_file, "r") as tsv_in:
tsv_in = csv.reader(tsv_in, delimiter="\t")
for row in tsv_in:
headers = row
break
# Ex GSE45331_non-normalized.txt
predicted_header = 0
# Some files start with blank columns, so let's skip past those
while headers[predicted_header] == "":
predicted_header += 1
if headers[predicted_header].upper() in ["TARGETID", "TARGET_ID"]:
predicted_header += 1
# First the probe ID column
if headers[predicted_header].upper().strip() == "ILLUMICODE":
logger.error(
"Tried to process a beadTypeFile.txt, which we don't support",
input_file=job_context["sanitized_file_path"],
)
job_context["job"].failure_reason = "Unsupported filetype 'beadTypeFile.txt'"
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
elif headers[predicted_header].upper().strip() not in [
"ID_REF",
"PROBE_ID",
"IDREF",
"PROBEID",
"REF_ID",
"REFID",
"IDPROBE",
"ID_PROBE",
]:
job_context["job"].failure_reason = (
"Could not find any ID column in headers "
+ str(headers)
+ " for file "
+ job_context["sanitized_file_path"]
)
job_context["success"] = False
return job_context
else:
job_context["probeId"] = headers[predicted_header]
# Then check to make sure a detection pvalue exists, which is always(?) some form of
# 'Detection Pval'
for header in headers:
# check if header contains something like "detection pval" or "detection_pval"
pvalue_header = re.search(r"(detection)([\W_]?)(pval\w*)", header, re.IGNORECASE)
if pvalue_header:
break
else:
job_context["job"].failure_reason = "Could not detect PValue column!"
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
# Then, finally, create an absolutely bonkers regular expression
# which will explicitly hit on any sample which contains a sample
# ID _and_ ignores the magical word 'BEAD', etc. Great!
column_ids = set()
for sample in job_context["samples"]:
for offset, header in enumerate(headers, start=1):
if sample.title == header:
column_ids.add(offset)
continue
# Sometimes the title might actually be in the description field.
# To find this, look in all the related SampleAnnotations.
# Since there are multiple annotations, we need to break early before continuing.
# Related: https://github.com/AlexsLemonade/refinebio/issues/499
continue_me = False
for annotation in sample.sampleannotation_set.filter(is_ccdl=False):
try:
if annotation.data.get("description", "")[0] == header:
column_ids.add(offset)
continue_me = True
break
except Exception:
pass
if continue_me:
# Treat the header as the real title, as we will need it later.
sample.title = header
sample.save()
continue
if header.upper().replace(" ", "_") == "RAW_VALUE":
column_ids.add(offset)
continue
if (
header != ""
and (
sample.title.upper() in header.upper()
or sample.title.upper().endswith("_" + header.upper())
)
and "BEAD" not in header.upper()
and "NARRAYS" not in header.upper()
and "ARRAY_STDEV" not in header.upper()
and "PVAL" not in header.upper().replace(" ", "").replace("_", "")
):
column_ids.add(offset)
continue
for offset, header in enumerate(headers, start=1):
if "AVG_Signal" in header:
column_ids.add(offset)
continue
if len(column_ids) == 0:
job_context[
"job"
].failure_reason = f"could not find columns ids in {job_context['sanitized_file_path']}"
job_context["success"] = False
logger.error("Could not find columns ids in " + job_context["sanitized_file_path"])
job_context["job"].no_retry = True
return job_context
job_context["columnIds"] = ",".join(map(lambda id: str(id), column_ids))
except Exception as e:
job_context[
"job"
].failure_reason = (
f"failure to extract columns in {job_context['sanitized_file_path']}: {e}"
)
job_context["success"] = False
logger.exception(
"Failed to extract columns in " + job_context["sanitized_file_path"], exception=str(e)
)
job_context["job"].no_retry = True
return job_context
return job_context
def _detect_platform(job_context: Dict) -> Dict:
"""
Determine the platform/database to process this sample with.
They often provide something like "V2" or "V 2", but we don't trust them so we detect it ourselves.
Related: https://github.com/AlexsLemonade/refinebio/issues/232
"""
all_databases = {
"HOMO_SAPIENS": [
"illuminaHumanv1",
"illuminaHumanv2",
"illuminaHumanv3",
"illuminaHumanv4",
],
"MUS_MUSCULUS": ["illuminaMousev1", "illuminaMousev1p1", "illuminaMousev2",],
"RATTUS_NORVEGICUS": ["illuminaRatv1"],
}
sample0 = job_context["samples"][0]
databases = all_databases[sample0.organism.name]
# Loop over all of the possible platforms and find the one with the best match.
highest = 0.0
high_mapped_percent = 0.0
high_db = None
for platform in databases:
try:
result = subprocess.check_output(
[
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/detect_database.R",
"--platform",
platform,
"--inputFile",
job_context["sanitized_file_path"],
"--column",
# R strips column names, so we have to too
job_context["probeId"].strip(),
]
)
results = result.decode().split("\n")
cleaned_result = float(results[0].strip())
if cleaned_result > highest:
highest = cleaned_result
high_db = platform
high_mapped_percent = float(results[1].strip())
except Exception as e:
logger.exception(e, processor_job_id=job_context["job"].id)
continue
# Record our sample detection outputs for every sample.
for sample in job_context["samples"]:
sa = SampleAnnotation()
sa.sample = sample
sa.is_ccdl = True
sa.data = {
"detected_platform": high_db,
"detection_percentage": highest,
"mapped_percentage": high_mapped_percent,
}
sa.save()
# If the match is over 75%, record this and process it on that platform.
if high_mapped_percent > 75.0:
job_context["platform"] = high_db
# The match percentage is too low - send this to the no-opper instead.
else:
logger.info("Match percentage too low, NO_OP'ing and aborting.", job=job_context["job_id"])
processor_job = ProcessorJob()
processor_job.downloader_job = job_context["job"].downloader_job
processor_job.pipeline_applied = "NO_OP"
processor_job.ram_amount = job_context["job"].ram_amount
processor_job.save()
assoc = ProcessorJobOriginalFileAssociation()
assoc.original_file = job_context["original_files"][0]
assoc.processor_job = processor_job
assoc.save()
try:
send_job(ProcessorPipeline.NO_OP, processor_job)
except Exception as e:
# Batch dispatch error, likely during local test.
logger.error(e, job=processor_job)
job_context["abort"] = True
return job_context
def _run_illumina(job_context: Dict) -> Dict:
"""Processes an input TXT file to an output PCL file using a custom R script.
Expects a job_context which has been pre-populated with inputs, outputs
and the column identifiers which the R script needs for processing.
"""
try:
job_context["time_start"] = timezone.now()
formatted_command = [
"/usr/bin/Rscript",
"--vanilla",
"/home/user/data_refinery_workers/processors/illumina.R",
"--probeId",
# R strips column names, so we have to too
job_context["probeId"].strip(),
"--expression",
job_context["columnIds"],
"--platform",
job_context["platform"],
"--inputFile",
job_context["sanitized_file_path"],
"--outputFile",
job_context["output_file_path"],
"--cores",
str(multiprocessing.cpu_count()),
]
output = subprocess.check_output(formatted_command).decode()
logger.debug(f"illumina.R ran successfully with output '{output}'")
job_context["formatted_command"] = " ".join(formatted_command)
job_context["time_end"] = timezone.now()
except subprocess.CalledProcessError as e:
error_template = (
"Encountered error in R code while running illumina.R"
" pipeline during processing of {0}: {1}"
)
error_message = error_template.format(job_context["sanitized_file_path"], str(e))
logger.error(error_message, processor_job=job_context["job_id"])
logger.debug(f"illumina.R failed with output '{e.output.decode()}'")
job_context["job"].failure_reason = error_message
job_context["success"] = False
except Exception as e:
logger.exception(
"Exception raised while running illumina.R", processor_job=job_context["job_id"]
)
job_context["job"].failure_reason = f"Exception raised while running illumina.R: '{e}'"
job_context["success"] = False
return job_context
def _get_sample_for_column(column: str, job_context: Dict) -> Sample:
# First of all check if the title is the column name
try:
return job_context["samples"].get(title=column)
except Sample.DoesNotExist:
pass
# Or maybe they named their samples with a common prefix
try:
return job_context["samples"].get(title__iendswith="_" + column)
except Sample.DoesNotExist:
pass
# If the column name is not the title, maybe they used the convention
# <SAMPLE_TITLE>(.AVG)?_Signal
title_match = re.match(r"(?P<title>.*?)(\.AVG)?_Signal", column)
if title_match is not None:
try:
return job_context["samples"].get(title=title_match.group("title"))
except Sample.DoesNotExist:
pass
# Or maybe they also have a named detection pvalue column using the same
# naming scheme
name_match = re.match(r"(?P<name>.*)\.AVG_Signal", column)
if name_match is not None:
try:
return job_context["samples"].get(
sampleannotation__data__geo_columns__contains="{}.Detection Pval".format(
name_match.group("name")
)
)
except Sample.DoesNotExist:
pass
return None
def _create_result_objects(job_context: Dict) -> Dict:
"""Create the ComputationalResult objects after a Scan run is complete"""
result = ComputationalResult()
result.commands.append(job_context["formatted_command"])
result.is_ccdl = True
result.is_public = True
result.time_start = job_context["time_start"]
result.time_end = job_context["time_end"]
try:
processor_key = "ILLUMINA_SCAN"
result.processor = utils.find_processor(processor_key)
except Exception as e:
return utils.handle_processor_exception(job_context, processor_key, e)
result.save()
job_context["pipeline"].steps.append(result.id)
# Split the result into smashable subfiles
big_tsv = job_context["output_file_path"]
data = pd.read_csv(big_tsv, sep="\t", header=0, index_col=0)
individual_files = []
frames = np.split(data, len(data.columns), axis=1)
for frame in frames:
filename = (
frame.columns.values[0].replace("&", "").replace("*", "").replace(";", "") + ".tsv"
)
frame_path = job_context["work_dir"] + filename
frame.to_csv(frame_path, sep="\t", encoding="utf-8")
# This needs to be the same as the ones in the job context!
sample = _get_sample_for_column(frame.columns.values[0], job_context)
if sample is None:
job_context["job"].failure_reason = (
"Could not find sample for column "
+ frame.columns.values[0]
+ " while splitting Illumina file "
+ big_tsv
)
job_context["success"] = False
job_context["job"].no_retry = True
return job_context
computed_file = ComputedFile()
computed_file.absolute_file_path = frame_path
computed_file.filename = frame_path.split("/")[-1]
computed_file.result = result
computed_file.is_smashable = True
computed_file.is_qc = False
computed_file.is_public = True
computed_file.calculate_sha1()
computed_file.calculate_size()
computed_file.save()
job_context["computed_files"].append(computed_file)
SampleResultAssociation.objects.get_or_create(sample=sample, result=result)
SampleComputedFileAssociation.objects.get_or_create(
sample=sample, computed_file=computed_file
)
individual_files.append(computed_file)
logger.debug("Created %s", result)
job_context["success"] = True
job_context["individual_files"] = individual_files
job_context["result"] = result
return job_context
def illumina_to_pcl(job_id: int, cleanup=None) -> None:
pipeline = Pipeline(name=PipelineEnum.ILLUMINA.value)
initial_job_context = {"job_id": job_id, "pipeline": pipeline}
# When running the tests, don't clean up original files so we don't have to
# keep downloading them.
if cleanup is not None:
initial_job_context["cleanup"] = cleanup
return utils.run_pipeline(
initial_job_context,
[
utils.start_job,
_prepare_files,
_detect_encoding,
_sanitize_input_file,
_convert_sanitized_to_tsv,
_detect_columns,
_detect_platform,
_run_illumina,
_create_result_objects,
utils.end_job,
],
)
<|code_end|>
|
`/search/` responses contain incorrect information
### Context
I found what was causing https://github.com/AlexsLemonade/refinebio-frontend/issues/955
### Problem or idea
Sometimes results returned from `/search/` contain incorrect information, which causes incorrect information like `No Species` to be displayed on the search page.
An example is GSE112517:
Hitting https://api.refine.bio/v1/search/?search=accession_code:GSE112517 gives
```
...
"organism_names": [],
...
```
but hitting https://api.refine.bio/v1/experiments/GSE112517/ gives
```
...
"organism_names": ["HOMO_SAPIENS"],
...
```
### Solution or next step
My first instinct is that the Elasticsearch database is out of sync with our database. One easy way to test this would be to check whether or not manually refreshing Elasticsearch solves the problem for this experiment.
I also think that this _could_ be happening because `organism_names` is a computed property, which might cause some weirdness. Surveying the experiment locally and seeing what a local instance of Elasticsearch returns would be a good way to test if this is true.
| api/data_refinery_api/views/experiment_document.py
<|code_start|>##
# Experiment document views
##
from django.http import QueryDict
from django.utils.decorators import method_decorator
from rest_framework import serializers
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_IN,
)
from django_elasticsearch_dsl_drf.filter_backends import (
CompoundSearchFilterBackend,
DefaultOrderingFilterBackend,
FacetedSearchFilterBackend,
FilteringFilterBackend,
OrderingFilterBackend,
)
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from django_elasticsearch_dsl_drf.serializers import DocumentSerializer
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from elasticsearch_dsl import TermsFacet
from six import iteritems
from data_refinery_api.exceptions import InvalidFilters
from data_refinery_common.models.documents import ExperimentDocument
class FormlessBrowsableAPIRenderer(BrowsableAPIRenderer):
"""A BrowsableAPIRenderer that never tries to display a form for any
method.
We use this in ExperimentDocumentView because otherwise we get an error
when trying to generate the form for POST requests.
"""
def show_form_for_method(self, view, method, request, instance):
return False
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet["facet"].get_aggregation()
queryset.aggs.bucket(field, agg).metric(
"total_samples",
"cardinality",
field="downloadable_samples",
precision_threshold=40000,
)
return queryset
class POSTFilteringFilterBackend(FilteringFilterBackend):
"""Adapts FilteringFilterBackend to take queries from POST requests"""
class MockRequest:
"""A mock request object to give to FilteringFilterBackend that
only has a query_params field.
The purpose of this class is to convert the request data to a form
that can be understood by FilteringFilterBackend
"""
def add_to_params(self, key, value):
"""Add to query params, converting to string if necessary"""
if type(value) == str:
self.query_params.appendlist(key, value)
elif type(value) == int or type(value) == bool:
self.query_params.appendlist(key, str(value))
else:
# We shouldn't be filtering on Null, lists, or dicts
raise InvalidFilters(
message="Invalid type {} for filter value {}".format(type(value), str(value))
)
def __init__(self, request):
self.query_params = QueryDict(mutable=True)
for key, value in request.data.items():
if type(value) == list:
for item in value:
self.add_to_params(key, item)
else:
self.add_to_params(key, value)
def get_filter_query_params(self, request, view):
"""Override get_filter_query_params to insert our own in POST requests."""
if request.method != "POST":
return {}
return super(POSTFilteringFilterBackend, self).get_filter_query_params(
POSTFilteringFilterBackend.MockRequest(request), view
)
def get_schema_fields(self, view):
"""Return no schema fields since we don't use any query parameters.
This has to be defined, otherwise we get FilteringFilterBackend's
schema fields instead, which causes an error if both of them are used
in the same view."""
return []
class ExperimentDocumentSerializer(DocumentSerializer):
"""Serializer for the Experiment document."""
class Meta(object):
"""Meta options."""
document = ExperimentDocument
fields = (
"id",
"title",
"publication_title",
"description",
"technology",
"accession_code",
"alternate_accession_code",
"submitter_institution",
"has_publication",
"publication_doi",
"publication_authors",
"sample_metadata_fields",
"platform_names",
"platform_accession_codes",
"organism_names",
"pubmed_id",
"num_total_samples",
"num_processed_samples",
"num_downloadable_samples",
"source_first_published",
)
read_only_fields = fields
@method_decorator(
name="list",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="accession_code",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by accession code, can have multiple values. Eg: `?accession_code=microarray&accession_code=rna-seq`",
),
openapi.Parameter(
name="technology",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name="has_publication",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name="platform",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name="organism",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name="num_processed_samples",
in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
This endpoint also accepts POST requests for larger queries. Any of the filters
accepted as query parameters are also accepted in a JSON object in the request
body.
Example Requests (from our tests):
```python
import requests
import json
headers = {
'Content-Type': 'application/json',
}
# Basic filter
search = {"accession_code": "GSE123"}
requests.post(host + '/v1/search/', json.dumps(search), headers=headers)
# __in filter
search = {"accession_code__in": ["GSE123"]}
requests.post(host + '/v1/search/', json.dumps(search), headers=headers)
# numeric filter
search = {"num_downloadable_samples__gt": 0}
requests.post(host + '/v1/search/', json.dumps(search), headers=headers)
```
""",
),
)
class ExperimentDocumentView(DocumentViewSet):
""" ElasticSearch powered experiment search. """
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
renderer_classes = [JSONRenderer, FormlessBrowsableAPIRenderer]
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
POSTFilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended,
]
# Primitive
lookup_field = "id"
# Define search fields
# Is this exhaustive enough?
search_fields = {
"title": {"boost": 10},
"publication_authors": {"boost": 8}, # "People will search themselves"
"sample_keywords": {"boost": 7},
"publication_title": {"boost": 5},
"submitter_institution": {"boost": 3},
"description": {"boost": 2},
"accession_code": None,
"alternate_accession_code": None,
"publication_doi": None,
"pubmed_id": None,
"sample_metadata_fields": None,
"platform_names": None,
}
# Define filtering fields
filter_fields = {
"id": {"field": "_id", "lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN],},
"technology": "technology",
"has_publication": "has_publication",
"accession_code": "accession_code",
"alternate_accession_code": "alternate_accession_code",
"platform": "platform_accession_codes",
"organism": "organism_names.raw",
"num_processed_samples": {
"field": "num_processed_samples",
"lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN, LOOKUP_QUERY_GT],
},
"num_downloadable_samples": {
"field": "num_downloadable_samples",
"lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN, LOOKUP_QUERY_GT],
},
"sample_keywords": "sample_keywords",
}
# Define ordering fields
ordering_fields = {
"id": "id",
"title": "title.raw",
"description": "description.raw",
"num_total_samples": "num_total_samples",
"num_downloadable_samples": "num_downloadable_samples",
"source_first_published": "source_first_published",
}
# Specify default ordering
ordering = (
"_score",
"-num_total_samples",
"id",
"title",
"description",
"-source_first_published",
)
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
"technology": {
"field": "technology",
"facet": TermsFacet,
"enabled": True, # These are enabled by default, which is more expensive but more simple.
},
"organism_names": {
"field": "organism_names.raw",
"facet": TermsFacet,
"enabled": True,
"options": {"size": 999999},
},
"platform_accession_codes": {
"field": "platform_accession_codes",
"facet": TermsFacet,
"enabled": True,
"global": False,
"options": {"size": 999999},
},
"has_publication": {
"field": "has_publication",
"facet": TermsFacet,
"enabled": True,
"global": False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
"technology_global": {
"field": "technology",
"facet": TermsFacet,
"enabled": False,
"global": True,
},
"organism_names_global": {
"field": "organism_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
"options": {"size": 999999},
},
"platform_names_global": {
"field": "platform_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
"options": {"size": 999999},
},
"has_publication_global": {
"field": "platform_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
},
}
faceted_search_param = "facet"
# Define a separate post method so that we can hide it in the
# documentation. Otherwise, the auto-generated documentation for the post
# method is incorrect
@swagger_auto_schema(auto_schema=None)
def post(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data["facets"] = self.transform_es_facets(response.data["facets"])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet["buckets"]:
if field == "has_publication":
filter_group[bucket["key_as_string"]] = bucket["total_samples"]["value"]
else:
filter_group[bucket["key"]] = bucket["total_samples"]["value"]
result[field] = filter_group
return result
<|code_end|>
common/data_refinery_common/models/documents.py
<|code_start|>from django_elasticsearch_dsl import Document, Index, fields
from elasticsearch_dsl import analyzer
from elasticsearch_dsl.analysis import token_filter
from data_refinery_common.models.experiment import Experiment
experiment_index = Index("experiments")
experiment_index.settings(number_of_shards=1, number_of_replicas=0, max_result_window=9999999)
# via https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.2/advanced_usage_examples.html?highlight=ngram#id8
# via https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/issues/110
edge_ngram_completion_filter = token_filter(
"edge_ngram_completion_filter", type="edge_ngram", min_gram=3, max_gram=12
)
html_strip = analyzer(
"html_strip",
tokenizer="whitespace",
filter=[edge_ngram_completion_filter, "standard", "lowercase", "stop", "snowball"],
char_filter=["html_strip"],
)
html_strip_no_ngram = analyzer(
"html_strip_no_ngram",
tokenizer="standard",
filter=["standard", "lowercase", "stop"],
char_filter=["html_strip"],
)
html_strip_no_stop = analyzer(
"html_strip_no_stop",
tokenizer="whitespace",
filter=["standard", "lowercase"],
char_filter=["html_strip"],
)
standard_keyword = analyzer("standard_keyword", tokenizer="keyword", filter=[],)
@experiment_index.doc_type
class ExperimentDocument(Document):
""" Our Experiment ElasticSearch Document, which
corresponds to our Experiment model. """
# Keyword Fields
title = fields.TextField(
analyzer=html_strip, fielddata=True, fields={"raw": fields.KeywordField()}
)
publication_title = fields.TextField(
analyzer=html_strip, fielddata=True, fields={"raw": fields.KeywordField()}
)
description = fields.TextField(
analyzer=html_strip, fielddata=True, fields={"raw": fields.KeywordField()}
)
publication_authors = fields.TextField(
analyzer=html_strip, fielddata=True, fields={"raw": fields.KeywordField()}
)
technology = fields.TextField(
analyzer=html_strip_no_stop, fielddata=True, fields={"raw": fields.KeywordField()}
)
organism_names = fields.TextField(
analyzer=html_strip_no_ngram, fielddata=True, fields={"raw": fields.KeywordField()}
)
platform_names = fields.TextField(
analyzer=standard_keyword, fielddata=True, fields={"raw": fields.TextField()}
)
platform_accession_codes = fields.TextField(
analyzer=standard_keyword, fielddata=True, fields={"raw": fields.TextField()}
)
# Basic Fields
accession_code = fields.KeywordField()
alternate_accession_code = fields.KeywordField()
submitter_institution = fields.TextField()
publication_doi = fields.TextField()
has_publication = fields.BooleanField()
sample_metadata_fields = fields.TextField()
pubmed_id = fields.TextField()
num_total_samples = fields.IntegerField()
num_processed_samples = fields.IntegerField()
num_downloadable_samples = fields.IntegerField()
source_first_published = fields.DateField()
# Index all downloadable samples as keywords so that we can calculate unique counts on the facets
downloadable_samples = fields.ListField(fields.KeywordField())
# Index our sample keywords so that we can use them for better search
sample_keywords = fields.ListField(fields.KeywordField())
class Django:
model = Experiment
parallel_indexing = True
queryset_pagination = 3000
fields = [
"id",
]
def get_queryset(self):
""" Override default queryset """
return super(ExperimentDocument, self).get_queryset().order_by("id")
<|code_end|>
common/data_refinery_common/models/experiment.py
<|code_start|>from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models import Count
from django.db.models.expressions import Q
from django.utils import timezone
from data_refinery_common.models.managers import ProcessedPublicObjectsManager, PublicObjectsManager
class Experiment(models.Model):
""" An Experiment or Study """
class Meta:
db_table = "experiments"
base_manager_name = "public_objects"
def __str__(self):
return "Experiment: " + self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_public_objects = ProcessedPublicObjectsManager()
# Relations
samples = models.ManyToManyField("Sample", through="ExperimentSampleAssociation")
organisms = models.ManyToManyField("Organism", through="ExperimentOrganismAssociation")
# Identifiers
accession_code = models.CharField(max_length=64, unique=True)
alternate_accession_code = models.CharField(max_length=64, unique=True, null=True)
# Historical Properties
source_database = models.CharField(max_length=32) # "ArrayExpress, "SRA", "GEO"
source_url = models.TextField()
# Properties
# I was always under the impression that TextFields were slower
# than CharFields, however the Postgres documentation disagrees:
# https://www.postgresql.org/docs/9.0/static/datatype-character.html
title = models.TextField()
description = models.TextField()
protocol_description = models.JSONField(default=dict)
technology = models.CharField(max_length=256, blank=True)
submitter_institution = models.CharField(max_length=256, blank=True)
has_publication = models.BooleanField(default=False)
publication_title = models.TextField(default="")
publication_doi = models.CharField(max_length=64, blank=True)
publication_authors = ArrayField(models.TextField(), default=list)
pubmed_id = models.CharField(max_length=32, blank=True)
source_first_published = models.DateTimeField(null=True)
source_last_modified = models.DateTimeField(null=True)
# Cached Computed Properties
num_total_samples = models.IntegerField(default=0)
num_processed_samples = models.IntegerField(default=0)
num_downloadable_samples = models.IntegerField(default=0)
sample_keywords = ArrayField(models.TextField(), default=list)
sample_metadata_fields = ArrayField(models.TextField(), default=list)
platform_names = ArrayField(models.TextField(), default=list)
platform_accession_codes = ArrayField(models.TextField(), default=list)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
if self.accession_code and not self.alternate_accession_code:
if self.accession_code.startswith("GSE"):
self.alternate_accession_code = "E-GEOD-" + self.accession_code[3:]
elif self.accession_code.startswith("E-GEOD-"):
self.alternate_accession_code = "GSE" + self.accession_code[7:]
return super(Experiment, self).save(*args, **kwargs)
def update_num_samples(self):
""" Update our cache values """
aggregates = self.samples.aggregate(
num_total_samples=Count("id"),
num_processed_samples=Count("id", filter=Q(is_processed=True)),
num_downloadable_samples=Count(
"id", filter=Q(is_processed=True, organism__qn_target__isnull=False)
),
)
self.num_total_samples = aggregates["num_total_samples"]
self.num_processed_samples = aggregates["num_processed_samples"]
self.num_downloadable_samples = aggregates["num_downloadable_samples"]
self.save()
def to_metadata_dict(self):
""" Render this Experiment as a dict """
metadata = {}
metadata["title"] = self.title
metadata["accession_code"] = self.accession_code
metadata["organisms"] = list(self.organisms.all().values_list("name", flat=True))
metadata["sample_accession_codes"] = list(
self.samples.all().values_list("accession_code", flat=True)
)
metadata["description"] = self.description
metadata["protocol_description"] = self.protocol_description
metadata["technology"] = self.technology
metadata["submitter_institution"] = self.submitter_institution
metadata["has_publication"] = self.has_publication
metadata["publication_title"] = self.publication_title
metadata["publication_doi"] = self.publication_doi
metadata["pubmed_id"] = self.pubmed_id
if self.source_first_published:
metadata["source_first_published"] = self.source_first_published.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_first_published"] = ""
if self.source_last_modified:
metadata["source_last_modified"] = self.source_last_modified.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_last_modified"] = ""
return metadata
def get_sample_keywords(self):
""" Get the human-readable name of all of the keywords that are defined
on at least one sample
"""
keywords = set()
for sample in self.samples.all():
keywords |= set(sample.keywords.values_list("name__human_readable_name", flat=True))
return list(keywords)
def get_sample_metadata_fields(self):
""" Get all metadata fields that are non-empty for at least one sample in the experiment.
See https://github.com/AlexsLemonade/refinebio-frontend/issues/211 for why this is needed.
"""
fields = []
possible_fields = [
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
]
samples = self.samples.all()
for field in possible_fields:
for sample in samples:
if getattr(sample, field) != None and getattr(sample, field) != "":
fields.append(field)
break
return fields
def update_sample_keywords(self):
self.sample_keywords = self.get_sample_keywords()
def update_sample_metadata_fields(self):
self.sample_metadata_fields = self.get_sample_metadata_fields()
def update_platform_names(self):
self.platform_names = self.get_platform_names()
self.platform_accession_codes = self.get_platform_accession_codes()
def get_sample_technologies(self):
""" Get a list of unique technologies for all of the associated samples
"""
return list(set([sample.technology for sample in self.samples.all()]))
def get_platform_names(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_name for sample in self.samples.all()]))
def get_platform_accession_codes(self):
""" Get a list of unique platforms for all of the associated samples
"""
return list(set([sample.platform_accession_code for sample in self.samples.all()]))
@property
def platforms(self):
""" Returns a list of related pipelines """
return list(set([sample.platform_name for sample in self.samples.all()]))
@property
def pretty_platforms(self):
""" Returns a prettified list of related pipelines """
return list(set([sample.pretty_platform for sample in self.samples.all()]))
@property
def processed_samples(self):
return list(
[sample.accession_code for sample in self.samples.all() if sample.is_processed == True]
)
@property
def organism_names(self):
""" Get a list of unique organism names that has at least one downloadable sample """
result = (
self.samples.filter(is_processed=True, organism__qn_target__isnull=False)
.values_list("organism__name", flat=True)
.distinct()
)
return list(result)
@property
def downloadable_samples(self):
"""
Returns the accession codes of the downloadable samples in this experiment.
This is indexed on elastic search and used to count the number of samples
on the filters.
"""
return list(
self.samples.filter(is_processed=True, organism__qn_target__isnull=False).values_list(
"accession_code", flat=True
)
)
<|code_end|>
| api/data_refinery_api/views/experiment_document.py
<|code_start|>##
# Experiment document views
##
from django.http import QueryDict
from django.utils.decorators import method_decorator
from rest_framework import serializers
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from django_elasticsearch_dsl_drf.constants import (
LOOKUP_FILTER_RANGE,
LOOKUP_QUERY_GT,
LOOKUP_QUERY_IN,
)
from django_elasticsearch_dsl_drf.filter_backends import (
CompoundSearchFilterBackend,
DefaultOrderingFilterBackend,
FacetedSearchFilterBackend,
FilteringFilterBackend,
OrderingFilterBackend,
)
from django_elasticsearch_dsl_drf.pagination import LimitOffsetPagination as ESLimitOffsetPagination
from django_elasticsearch_dsl_drf.serializers import DocumentSerializer
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from elasticsearch_dsl import TermsFacet
from six import iteritems
from data_refinery_api.exceptions import InvalidFilters
from data_refinery_common.models.documents import ExperimentDocument
class FormlessBrowsableAPIRenderer(BrowsableAPIRenderer):
"""A BrowsableAPIRenderer that never tries to display a form for any
method.
We use this in ExperimentDocumentView because otherwise we get an error
when trying to generate the form for POST requests.
"""
def show_form_for_method(self, view, method, request, instance):
return False
class FacetedSearchFilterBackendExtended(FacetedSearchFilterBackend):
def aggregate(self, request, queryset, view):
"""Extends FacetedSearchFilterBackend to add additional metrics to each bucket
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/master/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L19
We have the downloadable sample accession codes indexed for each experiment.
The cardinality metric, returns the number of unique samples for each bucket.
However it's just an approximate
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html#_counts_are_approximate
I used the highest possible precision threshold, but this might increase the amount
of memory used.
"""
facets = self.construct_facets(request, view)
for field, facet in iteritems(facets):
agg = facet["facet"].get_aggregation()
queryset.aggs.bucket(field, agg).metric(
"total_samples",
"cardinality",
field="downloadable_samples",
precision_threshold=40000,
)
return queryset
class POSTFilteringFilterBackend(FilteringFilterBackend):
"""Adapts FilteringFilterBackend to take queries from POST requests"""
class MockRequest:
"""A mock request object to give to FilteringFilterBackend that
only has a query_params field.
The purpose of this class is to convert the request data to a form
that can be understood by FilteringFilterBackend
"""
def add_to_params(self, key, value):
"""Add to query params, converting to string if necessary"""
if type(value) == str:
self.query_params.appendlist(key, value)
elif type(value) == int or type(value) == bool:
self.query_params.appendlist(key, str(value))
else:
# We shouldn't be filtering on Null, lists, or dicts
raise InvalidFilters(
message="Invalid type {} for filter value {}".format(type(value), str(value))
)
def __init__(self, request):
self.query_params = QueryDict(mutable=True)
for key, value in request.data.items():
if type(value) == list:
for item in value:
self.add_to_params(key, item)
else:
self.add_to_params(key, value)
def get_filter_query_params(self, request, view):
"""Override get_filter_query_params to insert our own in POST requests."""
if request.method != "POST":
return {}
return super(POSTFilteringFilterBackend, self).get_filter_query_params(
POSTFilteringFilterBackend.MockRequest(request), view
)
def get_schema_fields(self, view):
"""Return no schema fields since we don't use any query parameters.
This has to be defined, otherwise we get FilteringFilterBackend's
schema fields instead, which causes an error if both of them are used
in the same view."""
return []
class ExperimentDocumentSerializer(DocumentSerializer):
"""Serializer for the Experiment document."""
class Meta(object):
"""Meta options."""
document = ExperimentDocument
fields = (
"id",
"title",
"publication_title",
"description",
"technology",
"accession_code",
"alternate_accession_code",
"submitter_institution",
"has_publication",
"publication_doi",
"publication_authors",
"sample_metadata_fields",
"platform_names",
"platform_accession_codes",
"organism_names",
"downloadable_organism_names",
"pubmed_id",
"num_total_samples",
"num_processed_samples",
"num_downloadable_samples",
"source_first_published",
)
read_only_fields = fields
@method_decorator(
name="list",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="accession_code",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by accession code, can have multiple values. Eg: `?accession_code=microarray&accession_code=rna-seq`",
),
openapi.Parameter(
name="technology",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by technology, can have multiple values. Eg: `?technology=microarray&technology=rna-seq`",
),
openapi.Parameter(
name="has_publication",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Filter the results that have associated publications with `?has_publication=true`",
),
openapi.Parameter(
name="platform",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by platform, this parameter can have multiple values.",
),
openapi.Parameter(
name="organism",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Allows filtering the results by organism, this parameter can have multiple values.",
),
openapi.Parameter(
name="num_processed_samples",
in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
description="Use ElasticSearch queries to specify the number of processed samples of the results",
),
],
operation_description="""
Use this endpoint to search among the experiments.
This is powered by ElasticSearch, information regarding advanced usages of the
filters can be found in the [Django-ES-DSL-DRF docs](https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.1/filtering_usage_examples.html#filtering)
There's an additional field in the response named `facets` that contain stats on the number of results per filter type.
Example Requests:
```
?search=medulloblastoma
?id=1
?search=medulloblastoma&technology=microarray&has_publication=true
?ordering=source_first_published
```
This endpoint also accepts POST requests for larger queries. Any of the filters
accepted as query parameters are also accepted in a JSON object in the request
body.
Example Requests (from our tests):
```python
import requests
import json
headers = {
'Content-Type': 'application/json',
}
# Basic filter
search = {"accession_code": "GSE123"}
requests.post(host + '/v1/search/', json.dumps(search), headers=headers)
# __in filter
search = {"accession_code__in": ["GSE123"]}
requests.post(host + '/v1/search/', json.dumps(search), headers=headers)
# numeric filter
search = {"num_downloadable_samples__gt": 0}
requests.post(host + '/v1/search/', json.dumps(search), headers=headers)
```
""",
),
)
class ExperimentDocumentView(DocumentViewSet):
"""ElasticSearch powered experiment search."""
document = ExperimentDocument
serializer_class = ExperimentDocumentSerializer
pagination_class = ESLimitOffsetPagination
renderer_classes = [JSONRenderer, FormlessBrowsableAPIRenderer]
# Filter backends provide different functionality we want
filter_backends = [
FilteringFilterBackend,
POSTFilteringFilterBackend,
OrderingFilterBackend,
DefaultOrderingFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackendExtended,
]
# Primitive
lookup_field = "id"
# Define search fields
# Is this exhaustive enough?
search_fields = {
"title": {"boost": 10},
"publication_authors": {"boost": 8}, # "People will search themselves"
"sample_keywords": {"boost": 7},
"publication_title": {"boost": 5},
"submitter_institution": {"boost": 3},
"description": {"boost": 2},
"accession_code": None,
"alternate_accession_code": None,
"publication_doi": None,
"pubmed_id": None,
"sample_metadata_fields": None,
"platform_names": None,
}
# Define filtering fields
filter_fields = {
"id": {"field": "_id", "lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN],},
"technology": "technology",
"has_publication": "has_publication",
"accession_code": "accession_code",
"alternate_accession_code": "alternate_accession_code",
"platform": "platform_accession_codes",
"organism": "organism_names.raw",
"downloadable_organism": "downloadable_organism_names.raw",
"num_processed_samples": {
"field": "num_processed_samples",
"lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN, LOOKUP_QUERY_GT],
},
"num_downloadable_samples": {
"field": "num_downloadable_samples",
"lookups": [LOOKUP_FILTER_RANGE, LOOKUP_QUERY_IN, LOOKUP_QUERY_GT],
},
"sample_keywords": "sample_keywords",
}
# Define ordering fields
ordering_fields = {
"id": "id",
"title": "title.raw",
"description": "description.raw",
"num_total_samples": "num_total_samples",
"num_downloadable_samples": "num_downloadable_samples",
"source_first_published": "source_first_published",
}
# Specify default ordering
ordering = (
"_score",
"-num_total_samples",
"id",
"title",
"description",
"-source_first_published",
)
# Facets (aka Aggregations) provide statistics about the query result set in the API response.
# More information here: https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/03a3aa716db31868ca3a71340513a993741a4177/src/django_elasticsearch_dsl_drf/filter_backends/faceted_search.py#L24
faceted_search_fields = {
"technology": {
"field": "technology",
"facet": TermsFacet,
"enabled": True, # These are enabled by default, which is more expensive but more simple.
},
"downloadable_organism_names": {
"field": "downloadable_organism_names.raw",
"facet": TermsFacet,
"enabled": True,
"options": {"size": 999999},
},
"platform_accession_codes": {
"field": "platform_accession_codes",
"facet": TermsFacet,
"enabled": True,
"global": False,
"options": {"size": 999999},
},
"has_publication": {
"field": "has_publication",
"facet": TermsFacet,
"enabled": True,
"global": False,
},
# We don't actually need any "globals" to drive our web frontend,
# but we'll leave them available but not enabled by default, as they're
# expensive.
"technology_global": {
"field": "technology",
"facet": TermsFacet,
"enabled": False,
"global": True,
},
"organism_names_global": {
"field": "organism_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
"options": {"size": 999999},
},
"platform_names_global": {
"field": "platform_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
"options": {"size": 999999},
},
"has_publication_global": {
"field": "platform_names",
"facet": TermsFacet,
"enabled": False,
"global": True,
},
}
faceted_search_param = "facet"
# Define a separate post method so that we can hide it in the
# documentation. Otherwise, the auto-generated documentation for the post
# method is incorrect
@swagger_auto_schema(auto_schema=None)
def post(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def list(self, request, *args, **kwargs):
response = super(ExperimentDocumentView, self).list(request, args, kwargs)
response.data["facets"] = self.transform_es_facets(response.data["facets"])
return response
def transform_es_facets(self, facets):
"""Transforms Elastic Search facets into a set of objects where each one corresponds
to a filter group. Example:
{ technology: {rna-seq: 254, microarray: 8846, unknown: 0} }
Which means the users could attach `?technology=rna-seq` to the url and expect 254
samples returned in the results.
"""
result = {}
for field, facet in iteritems(facets):
filter_group = {}
for bucket in facet["buckets"]:
if field == "has_publication":
filter_group[bucket["key_as_string"]] = bucket["total_samples"]["value"]
else:
filter_group[bucket["key"]] = bucket["total_samples"]["value"]
result[field] = filter_group
return result
<|code_end|>
common/data_refinery_common/models/documents.py
<|code_start|>from django_elasticsearch_dsl import Document, Index, fields
from elasticsearch_dsl import analyzer
from elasticsearch_dsl.analysis import token_filter
from data_refinery_common.models.experiment import Experiment
experiment_index = Index("experiments")
experiment_index.settings(number_of_shards=1, number_of_replicas=0, max_result_window=9999999)
# via https://django-elasticsearch-dsl-drf.readthedocs.io/en/0.17.2/advanced_usage_examples.html?highlight=ngram#id8
# via https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/issues/110
edge_ngram_completion_filter = token_filter(
"edge_ngram_completion_filter", type="edge_ngram", min_gram=3, max_gram=12
)
html_strip = analyzer(
"html_strip",
tokenizer="whitespace",
filter=[edge_ngram_completion_filter, "standard", "lowercase", "stop", "snowball"],
char_filter=["html_strip"],
)
html_strip_no_ngram = analyzer(
"html_strip_no_ngram",
tokenizer="standard",
filter=["standard", "lowercase", "stop"],
char_filter=["html_strip"],
)
html_strip_no_stop = analyzer(
"html_strip_no_stop",
tokenizer="whitespace",
filter=["standard", "lowercase"],
char_filter=["html_strip"],
)
standard_keyword = analyzer("standard_keyword", tokenizer="keyword", filter=[],)
@experiment_index.doc_type
class ExperimentDocument(Document):
"""Our Experiment ElasticSearch Document, which
corresponds to our Experiment model."""
# Keyword Fields
title = fields.TextField(
analyzer=html_strip, fielddata=True, fields={"raw": fields.KeywordField()}
)
publication_title = fields.TextField(
analyzer=html_strip, fielddata=True, fields={"raw": fields.KeywordField()}
)
description = fields.TextField(
analyzer=html_strip, fielddata=True, fields={"raw": fields.KeywordField()}
)
publication_authors = fields.TextField(
analyzer=html_strip, fielddata=True, fields={"raw": fields.KeywordField()}
)
technology = fields.TextField(
analyzer=html_strip_no_stop, fielddata=True, fields={"raw": fields.KeywordField()}
)
organism_names = fields.TextField(
analyzer=html_strip_no_ngram, fielddata=True, fields={"raw": fields.KeywordField()}
)
downloadable_organism_names = fields.TextField(
analyzer=html_strip_no_ngram, fielddata=True, fields={"raw": fields.KeywordField()}
)
platform_names = fields.TextField(
analyzer=standard_keyword, fielddata=True, fields={"raw": fields.TextField()}
)
platform_accession_codes = fields.TextField(
analyzer=standard_keyword, fielddata=True, fields={"raw": fields.TextField()}
)
# Basic Fields
accession_code = fields.KeywordField()
alternate_accession_code = fields.KeywordField()
submitter_institution = fields.TextField()
publication_doi = fields.TextField()
has_publication = fields.BooleanField()
sample_metadata_fields = fields.TextField()
pubmed_id = fields.TextField()
num_total_samples = fields.IntegerField()
num_processed_samples = fields.IntegerField()
num_downloadable_samples = fields.IntegerField()
source_first_published = fields.DateField()
# Index all downloadable samples as keywords so that we can calculate unique counts on the facets
downloadable_samples = fields.ListField(fields.KeywordField())
# Index our sample keywords so that we can use them for better search
sample_keywords = fields.ListField(fields.KeywordField())
class Django:
model = Experiment
parallel_indexing = True
queryset_pagination = 3000
fields = [
"id",
]
def get_queryset(self):
"""Override default queryset"""
return super(ExperimentDocument, self).get_queryset().order_by("id")
<|code_end|>
common/data_refinery_common/models/experiment.py
<|code_start|>from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models import Count
from django.db.models.expressions import Q
from django.utils import timezone
from data_refinery_common.models.managers import ProcessedPublicObjectsManager, PublicObjectsManager
class Experiment(models.Model):
"""An Experiment or Study"""
class Meta:
db_table = "experiments"
base_manager_name = "public_objects"
def __str__(self):
return "Experiment: " + self.accession_code
# Managers
objects = models.Manager()
public_objects = PublicObjectsManager()
processed_public_objects = ProcessedPublicObjectsManager()
# Relations
samples = models.ManyToManyField("Sample", through="ExperimentSampleAssociation")
organisms = models.ManyToManyField("Organism", through="ExperimentOrganismAssociation")
# Identifiers
accession_code = models.CharField(max_length=64, unique=True)
alternate_accession_code = models.CharField(max_length=64, unique=True, null=True)
# Historical Properties
source_database = models.CharField(max_length=32) # "ArrayExpress, "SRA", "GEO"
source_url = models.TextField()
# Properties
# I was always under the impression that TextFields were slower
# than CharFields, however the Postgres documentation disagrees:
# https://www.postgresql.org/docs/9.0/static/datatype-character.html
title = models.TextField()
description = models.TextField()
protocol_description = models.JSONField(default=dict)
technology = models.CharField(max_length=256, blank=True)
submitter_institution = models.CharField(max_length=256, blank=True)
has_publication = models.BooleanField(default=False)
publication_title = models.TextField(default="")
publication_doi = models.CharField(max_length=64, blank=True)
publication_authors = ArrayField(models.TextField(), default=list)
pubmed_id = models.CharField(max_length=32, blank=True)
source_first_published = models.DateTimeField(null=True)
source_last_modified = models.DateTimeField(null=True)
# Cached Computed Properties
num_total_samples = models.IntegerField(default=0)
num_processed_samples = models.IntegerField(default=0)
num_downloadable_samples = models.IntegerField(default=0)
sample_keywords = ArrayField(models.TextField(), default=list)
sample_metadata_fields = ArrayField(models.TextField(), default=list)
platform_names = ArrayField(models.TextField(), default=list)
platform_accession_codes = ArrayField(models.TextField(), default=list)
# Common Properties
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
"""On save, update timestamps"""
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
if self.accession_code and not self.alternate_accession_code:
if self.accession_code.startswith("GSE"):
self.alternate_accession_code = "E-GEOD-" + self.accession_code[3:]
elif self.accession_code.startswith("E-GEOD-"):
self.alternate_accession_code = "GSE" + self.accession_code[7:]
return super(Experiment, self).save(*args, **kwargs)
def update_num_samples(self):
"""Update our cache values"""
aggregates = self.samples.aggregate(
num_total_samples=Count("id"),
num_processed_samples=Count("id", filter=Q(is_processed=True)),
num_downloadable_samples=Count(
"id", filter=Q(is_processed=True, organism__qn_target__isnull=False)
),
)
self.num_total_samples = aggregates["num_total_samples"]
self.num_processed_samples = aggregates["num_processed_samples"]
self.num_downloadable_samples = aggregates["num_downloadable_samples"]
self.save()
def to_metadata_dict(self):
"""Render this Experiment as a dict"""
metadata = {}
metadata["title"] = self.title
metadata["accession_code"] = self.accession_code
metadata["organisms"] = list(self.organisms.all().values_list("name", flat=True))
metadata["sample_accession_codes"] = list(
self.samples.all().values_list("accession_code", flat=True)
)
metadata["description"] = self.description
metadata["protocol_description"] = self.protocol_description
metadata["technology"] = self.technology
metadata["submitter_institution"] = self.submitter_institution
metadata["has_publication"] = self.has_publication
metadata["publication_title"] = self.publication_title
metadata["publication_doi"] = self.publication_doi
metadata["pubmed_id"] = self.pubmed_id
if self.source_first_published:
metadata["source_first_published"] = self.source_first_published.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_first_published"] = ""
if self.source_last_modified:
metadata["source_last_modified"] = self.source_last_modified.strftime(
"%Y-%m-%dT%H:%M:%S"
)
else:
metadata["source_last_modified"] = ""
return metadata
def get_sample_keywords(self):
"""Get the human-readable name of all of the keywords that are defined
on at least one sample
"""
keywords = set()
for sample in self.samples.all():
keywords |= set(sample.keywords.values_list("name__human_readable_name", flat=True))
return list(keywords)
def get_sample_metadata_fields(self):
"""Get all metadata fields that are non-empty for at least one sample in the experiment.
See https://github.com/AlexsLemonade/refinebio-frontend/issues/211 for why this is needed.
"""
fields = []
possible_fields = [
"sex",
"age",
"specimen_part",
"genotype",
"disease",
"disease_stage",
"cell_line",
"treatment",
"race",
"subject",
"compound",
"time",
]
samples = self.samples.all()
for field in possible_fields:
for sample in samples:
if getattr(sample, field) != None and getattr(sample, field) != "":
fields.append(field)
break
return fields
def update_sample_keywords(self):
self.sample_keywords = self.get_sample_keywords()
def update_sample_metadata_fields(self):
self.sample_metadata_fields = self.get_sample_metadata_fields()
def update_platform_names(self):
self.platform_names = self.get_platform_names()
self.platform_accession_codes = self.get_platform_accession_codes()
def get_sample_technologies(self):
"""Get a list of unique technologies for all of the associated samples"""
return list(set([sample.technology for sample in self.samples.all()]))
def get_platform_names(self):
"""Get a list of unique platforms for all of the associated samples"""
return list(set([sample.platform_name for sample in self.samples.all()]))
def get_platform_accession_codes(self):
"""Get a list of unique platforms for all of the associated samples"""
return list(set([sample.platform_accession_code for sample in self.samples.all()]))
@property
def platforms(self):
"""Returns a list of related pipelines"""
return list(set([sample.platform_name for sample in self.samples.all()]))
@property
def pretty_platforms(self):
"""Returns a prettified list of related pipelines"""
return list(set([sample.pretty_platform for sample in self.samples.all()]))
@property
def processed_samples(self):
return list(
[sample.accession_code for sample in self.samples.all() if sample.is_processed == True]
)
@property
def downloadable_organism_names(self):
"""Get a list of unique organism names that has at least one downloadable sample"""
result = (
self.samples.filter(is_processed=True, organism__qn_target__isnull=False)
.values_list("organism__name", flat=True)
.distinct()
)
return list(result)
@property
def organism_names(self):
"""Get a list of unique organism names"""
result = self.samples.values_list("organism__name", flat=True).distinct()
return list(result)
@property
def downloadable_samples(self):
"""
Returns the accession codes of the downloadable samples in this experiment.
This is indexed on elastic search and used to count the number of samples
on the filters.
"""
return list(
self.samples.filter(is_processed=True, organism__qn_target__isnull=False).values_list(
"accession_code", flat=True
)
)
<|code_end|>
|
Intialize automatic_survey.py command
### Context
In the foreman as a sibling next to other accession surveying commands (ie feed_the_beast) we should add a new command called `automatic_survey.py`. This command will be run via cron job should be executed daily.
This management command should:
- Determine how many samples per technology we have gathered so far for the week (whats available)
- Determine how close we are to the target for per week processing (how many experiments / samples have we processed since for the week)
- checks to see if we have currently queued jobs at threshold (what hasnt finished yet)
- how many failures have we seen this week?
- Takes from the `GatheredAccession.objects.filter` as long as we are under target and call
`run_surveyor_for_accession(accession)` and remove from backlog after successfully creating survey job
| foreman/data_refinery_foreman/foreman/management/commands/automatic_survey.py
<|code_start|><|code_end|>
foreman/data_refinery_foreman/gatherer/agents/base.py
<|code_start|>"""Abstract base class for accession gathering automation agents."""
from abc import ABC, abstractmethod
from datetime import datetime
from http.client import RemoteDisconnected
from requests.exceptions import ConnectionError, ConnectTimeout
from urllib3.exceptions import ProtocolError
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models.experiment import Experiment
from data_refinery_common.models.gathered_accession import GatheredAccession
logger = get_and_configure_logger(__name__)
class AccessionAgentBase(ABC):
"Accession agent abstract base class."
previous_accessions = set()
retry_params = {
"retry_on_exception": lambda e: isinstance(
e, (ConnectionError, ConnectTimeout, ProtocolError, RemoteDisconnected)
),
"stop_max_attempt_number": 5,
"wait_exponential_multiplier": 1000, # Seconds.
"wait_exponential_max": 16000, # Seconds.
}
def __init__(self, options) -> None:
"""Populates args and values for major variables."""
self.options = options
self.count = options["count"]
self.keyword = options["keyword"]
self.organism = options["organism"]
self.since = options["since"]
self.until = options["until"] or datetime.now().strftime("%Y-%m-%d")
self.ids = self.get_ids()
self.populate_previous_accessions()
@abstractmethod
def build_query(self):
"""Returns query/query dict depending on the accession data source."""
pass
@abstractmethod
def collect_data(self):
"""Generates resulting entry collection."""
pass
@abstractmethod
def fetch_data(self):
"""Fetches data from an external or local data source."""
pass
@abstractmethod
def get_ids(self):
"""Gets IDs for query filtering depending on the accession technology."""
pass
def populate_previous_accessions(self) -> None:
"""Populates previous accession set from a provided excluded ids file."""
if not self.options["exclude_previous"] or self.previous_accessions:
return
# Gathered accessions.
self.previous_accessions.update(
(
entry["accession_code"]
for entry in GatheredAccession.objects.values("accession_code")
)
)
# Surveyed accessions.
experiments = Experiment.objects.values("accession_code", "alternate_accession_code")
self.previous_accessions.update(
(experiment["accession_code"] for experiment in experiments)
)
self.previous_accessions.update(
(experiment["alternate_accession_code"] for experiment in experiments)
)
<|code_end|>
| foreman/data_refinery_foreman/foreman/management/commands/automatic_survey.py
<|code_start|>"""
Automatic survey command.
"""
from django.core.management.base import BaseCommand
from django.template.defaultfilters import pluralize
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import GatheredAccession, SurveyedAccession, SurveyJob
from data_refinery_foreman.surveyor.management.commands.surveyor_dispatcher import (
queue_surveyor_for_accession,
)
logger = get_and_configure_logger(__name__)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("-d", "--days", help="The time window in days.", type=int, default=1)
parser.add_argument(
"-q",
"--quota",
help="The number of accessions to survey per the time window.",
type=int,
default=10,
)
def handle(self, *args, **options):
create_survey_jobs(options["days"], options["quota"])
def create_survey_jobs(days, quota):
"""Automatically creates survey jobs based on the passed time window requirements."""
remaining_quota = get_remaining_quota(days, quota)
if not remaining_quota:
logger.info("Unable to create new survey jobs: remaining quota is 0.")
return None
gathered_accessions = GatheredAccession.objects.order_by("created_at").values_list(
"accession_code", flat=True
)
BATCH_SIZE = 100
queued_accessions = []
for batch_start in range(0, len(gathered_accessions), BATCH_SIZE):
if len(queued_accessions) == remaining_quota:
logger.info("Unable to create new survey jobs: remaining quota is 0.")
break
batch = gathered_accessions[batch_start : batch_start + BATCH_SIZE]
if not batch:
break
surveyed_accessions = SurveyedAccession.objects.filter(
accession_code__in=batch
).values_list("accession_code", flat=True)
for accession_code in set(batch).difference(set(surveyed_accessions)):
try:
queue_surveyor_for_accession(accession_code)
except Exception as e:
logger.error(f"Couldn't queue accession {accession_code} due to: {e}")
else:
queued_accessions.append(accession_code)
if len(queued_accessions) == remaining_quota:
break
try:
SurveyedAccession.objects.bulk_create(
(
SurveyedAccession(accession_code=accession_code)
for accession_code in queued_accessions
)
)
queued_accession_count = len(queued_accessions)
logger.info(
f"Queued {queued_accession_count} accession{pluralize(queued_accession_count)} "
f"based on {quota} accession{pluralize(quota)} per {days} "
f"day{pluralize(days)} quota."
)
except Exception as e:
logger.error("Couldn't add surveyed accessions due to: %s" % e)
def get_remaining_quota(days, quota):
"""Returns the remaining quota or 0 if nothing left."""
time_window_survey_jobs = SurveyJob.objects.filter(
created_at__gt=timezone.now() - relativedelta(days=days)
)
return max(0, quota - time_window_survey_jobs.count())
<|code_end|>
foreman/data_refinery_foreman/gatherer/agents/base.py
<|code_start|>"""Abstract base class for accession gathering automation agents."""
from abc import ABC, abstractmethod
from datetime import datetime
from http.client import RemoteDisconnected
from requests.exceptions import ConnectionError, ConnectTimeout
from urllib3.exceptions import ProtocolError
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import GatheredAccession, SurveyedAccession
logger = get_and_configure_logger(__name__)
class AccessionAgentBase(ABC):
"Accession agent abstract base class."
previous_accessions = set()
retry_params = {
"retry_on_exception": lambda e: isinstance(
e, (ConnectionError, ConnectTimeout, ProtocolError, RemoteDisconnected)
),
"stop_max_attempt_number": 5,
"wait_exponential_multiplier": 1000, # Seconds.
"wait_exponential_max": 16000, # Seconds.
}
def __init__(self, options) -> None:
"""Populates args and values for major variables."""
self.options = options
self.count = options["count"]
self.keyword = options["keyword"]
self.organism = options["organism"]
self.since = options["since"]
self.until = options["until"] or datetime.now().strftime("%Y-%m-%d")
self.ids = self.get_ids()
self.populate_previous_accessions()
@abstractmethod
def build_query(self):
"""Returns query/query dict depending on the accession data source."""
pass
@abstractmethod
def collect_data(self):
"""Generates resulting entry collection."""
pass
@abstractmethod
def fetch_data(self):
"""Fetches data from an external or local data source."""
pass
@abstractmethod
def get_ids(self):
"""Gets IDs for query filtering depending on the accession technology."""
pass
def populate_previous_accessions(self) -> None:
"""Populates previous accession set from gathered/surveyed accessions."""
if not self.options["exclude_previous"] or self.previous_accessions:
return
# Gathered accessions.
self.previous_accessions.update(
(
entry["accession_code"]
for entry in GatheredAccession.objects.values("accession_code")
)
)
# Surveyed accessions.
self.previous_accessions.update(
(sa["accession_code"] for sa in SurveyedAccession.objects.values("accession_code"))
)
<|code_end|>
|
Add indices to new models for automatic accession gathering
### Context
We want to start adding indices to new models that we create.
### Problem or idea
_The context should lead to something, an idea or a problem that you’re facing. Put that here._
### Solution or next step
_You can tag others or simply leave it for further investigation, but you must propose a next step towards solving the issue._
| common/data_refinery_common/migrations/0072_auto_20220927_2235.py
<|code_start|><|code_end|>
common/data_refinery_common/models/gathered_accession.py
<|code_start|>from datetime import datetime
from django.db import models
from django.utils import timezone
class GatheredAccession(models.Model):
"""Gathered accession model."""
class Meta:
db_table = "gathered_accessions"
accession_code = models.TextField(unique=True)
created_at = models.DateTimeField(auto_now_add=True)
last_modified_at = models.DateTimeField(auto_now=True)
organism = models.TextField()
published_date = models.DateTimeField()
sample_count = models.PositiveIntegerField(default=0)
source = models.TextField()
technology = models.TextField()
def __eq__(self, other: object) -> bool:
"""Returns True if two objects are equal. Otherwise returns False."""
return isinstance(other, GatheredAccession) and self.accession_code == other.accession_code
def __hash__(self) -> int:
"""Returns accession object unique hash value."""
return hash(self.accession_code)
def __str__(self) -> str:
"""Returns accession default string representation."""
return ", ".join(
(
self.accession_code,
self.technology,
self.source,
str(self.published_date.date()),
)
)
@staticmethod
def create_from_external_entry(data, source, technology, organism=None):
"""Creates accession object from MicroArray ArrayExpress entry."""
accession = GatheredAccession()
accession.accession_code = (
data.get("accession") or data.get("gse") or data.get("secondary_study_accession")
)
organism = data.get("organism") or data.get("scientific_name") or organism
if organism:
accession.organism = organism.lower()
published_date = (
data.get("first_public") or data.get("release_date") or data.get("submission_date")
)
accession.published_date = timezone.make_aware(
datetime.strptime(published_date, "%Y-%m-%d")
)
accession.source = source
accession.technology = technology
return accession
<|code_end|>
| common/data_refinery_common/migrations/0072_auto_20220927_2235.py
<|code_start|># Generated by Django 3.2.7 on 2022-09-27 22:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0071_gatheredaccession"),
]
operations = [
migrations.AddIndex(
model_name="gatheredaccession",
index=models.Index(fields=["accession_code"], name="gathered_ac_accessi_5dde9a_idx"),
),
migrations.AddIndex(
model_name="gatheredaccession",
index=models.Index(fields=["created_at"], name="gathered_ac_created_2efd0f_idx"),
),
]
<|code_end|>
common/data_refinery_common/models/gathered_accession.py
<|code_start|>from datetime import datetime
from django.db import models
from django.utils import timezone
class GatheredAccession(models.Model):
"""Gathered accession model."""
class Meta:
db_table = "gathered_accessions"
indexes = (
models.Index(fields=("accession_code",)),
models.Index(fields=("created_at",)),
)
accession_code = models.TextField(unique=True)
created_at = models.DateTimeField(auto_now_add=True)
last_modified_at = models.DateTimeField(auto_now=True)
organism = models.TextField()
published_date = models.DateTimeField()
sample_count = models.PositiveIntegerField(default=0)
source = models.TextField()
technology = models.TextField()
def __eq__(self, other: object) -> bool:
"""Returns True if two objects are equal. Otherwise returns False."""
return isinstance(other, GatheredAccession) and self.accession_code == other.accession_code
def __hash__(self) -> int:
"""Returns accession object unique hash value."""
return hash(self.accession_code)
def __str__(self) -> str:
"""Returns accession default string representation."""
return ", ".join(
(
self.accession_code,
self.technology,
self.source,
str(self.published_date.date()),
)
)
@staticmethod
def create_from_external_entry(data, source, technology, organism=None):
"""Creates accession object from MicroArray ArrayExpress entry."""
accession = GatheredAccession()
accession.accession_code = (
data.get("accession") or data.get("gse") or data.get("secondary_study_accession")
)
organism = data.get("organism") or data.get("scientific_name") or organism
if organism:
accession.organism = organism.lower()
published_date = (
data.get("first_public") or data.get("release_date") or data.get("submission_date")
)
accession.published_date = timezone.make_aware(
datetime.strptime(published_date, "%Y-%m-%d")
)
accession.source = source
accession.technology = technology
return accession
<|code_end|>
|
Duplicate Sample Accessions Breaks Dataset State
### Context
@jashapiro was adding some samples to a dataset recently when he noticed on the client side he was no longer able to modify his dataset. After some investigation I noticed that the issue occurred when samples with the same accessions codes are present in multiple experiments.
```python
josh = Dataset.objects.get(id='5f3dd9da-e1fd-4ee6-8c07-bd97a83a0dd1')
for key, value in josh.data.items():
accessions.extend(value)
len(accessions)
# 1715
samples = Sample.public_objects.filter(accession_code__in=accessions)
samples.count()
# 1709
accession_set = Set(accessions)
len(accession_set)
# 1709
```
```python
for key, value in josh.data.items():
items = set(value) & set(duplicates))
if len(items) > 0:
data[key] = items
print(data)
# {
# "GSE98279": {
# "GSM2590437",
# "GSM2590436",
# "GSM2590433",
# "GSM2590434",
# "GSM2590432",
# "GSM2590435",
# },
# "GSE98384": {
# "GSM2590437",
# "GSM2590436",
# "GSM2590433",
# "GSM2590434",
# "GSM2590432",
# "GSM2590435",
# },
# }
```
### Problem or idea
We should update this [line](https://github.com/AlexsLemonade/refinebio/blob/dev/api/data_refinery_api/views/dataset.py#L118) to either coerce the `accessions` list to a set or update the querset that defines `samples` to allow for duplicates.
We should also determine where in the logic that allows users to initially add the samples without failing this validation since the code should run before save. It only seems to be an issue after the duplicate sample accession codes are saved on the dataset instance and then another update is attempted.
### Solution or next step
We should update the code that checks if the dataset is in a valid state.
| api/data_refinery_api/views/dataset.py
<|code_start|>##
# Contains DatasetView
##
from collections import defaultdict
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from rest_framework import mixins, serializers, viewsets
from rest_framework.exceptions import APIException
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_api.exceptions import BadRequest, InvalidData
from data_refinery_common.enums import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
Dataset,
DatasetAnnotation,
Experiment,
Organism,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
)
logger = get_and_configure_logger(__name__)
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
def experiment_has_downloadable_samples(experiment, quant_sf_only=False):
if quant_sf_only:
try:
experiment = Experiment.public_objects.get(accession_code=experiment)
except Experiment.DoesNotExist:
return False
samples = experiment.sample_set.filter(
# We only want samples with a quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples.count() == 0:
return False
else:
try:
Experiment.processed_public_objects.get(accession_code=experiment)
except Experiment.DoesNotExist:
return False
return True
def validate_dataset(data):
"""
Dataset validation. Each experiment should always have at least one
sample, all samples should be downloadable, and when starting the smasher
there should be at least one experiment.
"""
if data.get("data") is None or type(data["data"]) != dict:
raise InvalidData("`data` must be a dict of lists.")
if data.get("start") and len(data["data"]) == 0:
raise InvalidData("`data` must contain at least one experiment.")
accessions = []
non_downloadable_experiments = []
for key, value in data["data"].items():
if type(value) != list:
raise InvalidData("`data` must be a dict of lists. Problem with `" + str(key) + "`")
if len(value) < 1:
raise InvalidData(
"`data` must be a dict of lists, each with one or more elements. Problem with `"
+ str(key)
+ "`"
)
if len(value) != len(set(value)):
raise InvalidData("Duplicate values detected in " + str(value))
# If they want "ALL", just make sure that the experiment has at least one downloadable sample
if value == ["ALL"]:
if not experiment_has_downloadable_samples(
key, quant_sf_only=data.get("quant_sf_only", False)
):
non_downloadable_experiments.append(key)
# Otherwise, we will check that all the samples they requested are downloadable
else:
accessions.extend(value)
if len(non_downloadable_experiments) != 0:
raise InvalidData(
message="Experiment(s) in dataset have zero downloadable samples. See `details` for a full list",
details=non_downloadable_experiments,
)
if len(accessions) == 0:
return
samples = Sample.public_objects.filter(accession_code__in=accessions)
if samples.count() != len(accessions):
raise InvalidData(
message="Sample(s) in dataset do not exist on refine.bio. See `details` for a full list",
details=list(set(accessions) - set(s.accession_code for s in samples)),
)
if data.get("quant_sf_only", False):
samples_without_quant_sf = samples.exclude(
# Exclude samples that have at least one uploaded quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples_without_quant_sf.count() > 0:
raise InvalidData(
message="Sample(s) in dataset are missing quant.sf files. See `details` for a full list",
details=[s.accession_code for s in samples_without_quant_sf],
)
else:
unprocessed_samples = samples.exclude(is_processed=True)
if unprocessed_samples.count() > 0:
raise InvalidData(
message="Non-downloadable sample(s) in dataset. See `details` for a full list",
details=[s.accession_code for s in unprocessed_samples],
)
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
"""This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
def create(self, validated_data):
# "start" isn't actually a field on the Dataset model, we just use it
# on the frontend to control when the dataset gets dispatched
if "start" in validated_data:
validated_data.pop("start")
return super(DatasetSerializer, self).create(validated_data)
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"email_address",
"email_ccdl_ok",
"notify_me",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {
"required": True,
},
"id": {
"read_only": True,
},
"is_processing": {
"read_only": True,
},
"is_processed": {
"read_only": True,
},
"is_available": {
"read_only": True,
},
"email_address": {"required": False, "write_only": True},
"email_ccdl_ok": {"required": False, "write_only": True},
"notify_me": {"required": False, "write_only": True},
"expires_on": {
"read_only": True,
},
"s3_bucket": {
"read_only": True,
},
"s3_key": {
"read_only": True,
},
"success": {
"read_only": True,
},
"failure_reason": {
"read_only": True,
},
"created_at": {
"read_only": True,
},
"last_modified": {
"read_only": True,
},
"size_in_bytes": {
"read_only": True,
},
"sha1": {
"read_only": True,
},
"download_url": {
"read_only": True,
},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
validate_dataset(data)
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="update",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
"""View and modify a single Dataset."""
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except (APIToken.DoesNotExist, ValidationError):
return serializer_context
def validate_token(self):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
APIToken.objects.get(id=token_id, is_activated=True)
except (APIToken.DoesNotExist, ValidationError):
raise BadRequest(
message="You must provide an active API token ID", error_type="invalid_token"
)
@staticmethod
def convert_ALL_to_accessions(data):
qn_organisms = Organism.get_objects_with_qn_targets()
for key in data["data"].keys():
accessions = data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
data["data"][key] = sample_codes
def validate_email_address_is_nonempty(self):
"""Check to make sure the email exists. We call this when getting ready to dispatch a dataset"""
supplied_email_address = self.request.data.get("email_address", None)
if supplied_email_address is None or supplied_email_address == "":
raise BadRequest(
message="You must provide an email address.", error_type="invalid_email"
)
def dispatch_job(self, serializer, obj):
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = obj
pjda.save()
job_sent = False
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception as e:
# Just log whatever exception happens, because the foreman wil requeue the job anyway
logger.error(e)
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
# create a new dataset annotation with the information of this request
annotation = DatasetAnnotation()
annotation.dataset = obj
annotation.data = {
"start": True,
"ip": get_client_ip(self.request),
"user_agent": self.request.META.get("HTTP_USER_AGENT", None),
}
annotation.save()
def create_or_update(self, serializer):
"""If `start` is set, fire off the job. Otherwise just create/update the dataset"""
data = serializer.validated_data
DatasetView.convert_ALL_to_accessions(data)
if data.get("start"):
self.validate_token()
self.validate_email_address_is_nonempty()
obj = serializer.save()
self.dispatch_job(serializer, obj)
else:
serializer.save()
def perform_create(self, serializer):
# Since we are creating a new dataset, there is no way it is already processed
self.create_or_update(serializer)
def perform_update(self, serializer):
# Check to make sure we have not already processed the dataset
old_object = self.get_object()
if old_object.is_processed:
raise BadRequest(
message="You may not update Datasets which have already been processed"
)
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
elif old_object.is_processing:
self.validate_email_address_is_nonempty()
serializer.validated_data["data"] = old_object.data
serializer.validated_data["aggregate_by"] = old_object.aggregate_by
serializer.save()
else:
self.create_or_update(serializer)
<|code_end|>
| api/data_refinery_api/views/dataset.py
<|code_start|>##
# Contains DatasetView
##
from collections import defaultdict
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from rest_framework import mixins, serializers, viewsets
from rest_framework.exceptions import APIException
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_api.exceptions import BadRequest, InvalidData
from data_refinery_common.enums import ProcessorPipeline
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
Dataset,
DatasetAnnotation,
Experiment,
Organism,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
)
logger = get_and_configure_logger(__name__)
def get_client_ip(request):
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR", "")
return ip
def experiment_has_downloadable_samples(experiment, quant_sf_only=False):
if quant_sf_only:
try:
experiment = Experiment.public_objects.get(accession_code=experiment)
except Experiment.DoesNotExist:
return False
samples = experiment.sample_set.filter(
# We only want samples with a quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if not samples.exists():
return False
else:
try:
Experiment.processed_public_objects.get(accession_code=experiment)
except Experiment.DoesNotExist:
return False
return True
def validate_dataset(data):
"""
Dataset validation. Each experiment should always have at least one
sample, all samples should be downloadable, and when starting the smasher
there should be at least one experiment.
"""
if data.get("data") is None or type(data["data"]) != dict:
raise InvalidData("`data` must be a dict of lists.")
if data.get("start") and not data["data"]:
raise InvalidData("`data` must contain at least one experiment.")
accessions = set()
non_downloadable_experiments = set()
for key, value in data["data"].items():
if type(value) != list:
raise InvalidData(f"`data` must be a dict of lists. Problem with `{key}`")
if not value:
raise InvalidData(
"`data` must be a dict of lists, each with one or more elements. "
f"Problem with `{key}`"
)
if len(value) != len(set(value)):
raise InvalidData(f"Duplicate values detected in {value}")
# If they want "ALL", just make sure that the experiment has at least one downloadable sample
if value == ["ALL"]:
if not experiment_has_downloadable_samples(
key, quant_sf_only=data.get("quant_sf_only", False)
):
non_downloadable_experiments.add(key)
# Otherwise, we will check that all the samples they requested are downloadable
else:
accessions.update(value)
if non_downloadable_experiments:
raise InvalidData(
message="Experiment(s) in dataset have zero downloadable samples. See `details` for a full list",
details=non_downloadable_experiments,
)
if not accessions:
return
samples = Sample.public_objects.filter(accession_code__in=accessions)
if samples.count() != len(accessions):
raise InvalidData(
message="Sample(s) in dataset do not exist on refine.bio. See `details` for a full list",
details=list(accessions - set(s.accession_code for s in samples)),
)
if data.get("quant_sf_only"):
samples_without_quant_sf = samples.exclude(
# Exclude samples that have at least one uploaded quant.sf file associated with them
results__computedfile__filename="quant.sf",
results__computedfile__s3_key__isnull=False,
results__computedfile__s3_bucket__isnull=False,
)
if samples_without_quant_sf.exists():
raise InvalidData(
message="Sample(s) in dataset are missing quant.sf files. See `details` for a full list",
details=[s.accession_code for s in samples_without_quant_sf],
)
else:
unprocessed_samples = samples.exclude(is_processed=True)
if unprocessed_samples.exists():
raise InvalidData(
message="Non-downloadable sample(s) in dataset. See `details` for a full list",
details=[s.accession_code for s in unprocessed_samples],
)
class DatasetDetailsExperimentSerializer(serializers.ModelSerializer):
"""This serializer contains all of the information about an experiment needed for the download
page
"""
sample_metadata = serializers.ReadOnlyField(source="sample_metadata_fields")
class Meta:
model = Experiment
fields = ("title", "accession_code", "organism_names", "sample_metadata", "technology")
class DatasetSerializer(serializers.ModelSerializer):
start = serializers.NullBooleanField(required=False)
experiments = DatasetDetailsExperimentSerializer(
source="get_experiments", many=True, read_only=True
)
organism_samples = serializers.SerializerMethodField(read_only=True)
worker_version = serializers.SerializerMethodField(read_only=True)
def __init__(self, *args, **kwargs):
super(DatasetSerializer, self).__init__(*args, **kwargs)
if "context" in kwargs:
if "request" in kwargs["context"]:
# only include the fields `experiments` and `organism_samples` when the param `?details=true`
# is provided. This is used on the frontend to render the downloads page
# thanks to https://django.cowhite.com/blog/dynamically-includeexclude-fields-to-django-rest-framwork-serializers-based-on-user-requests/
if "details" not in kwargs["context"]["request"].query_params:
self.fields.pop("experiments")
self.fields.pop("organism_samples")
self.fields.pop("worker_version")
# only include the field `download_url` if a valid token is specified
# the token lookup happens in the view.
if "token" not in kwargs["context"]:
self.fields.pop("download_url")
def create(self, validated_data):
# "start" isn't actually a field on the Dataset model, we just use it
# on the frontend to control when the dataset gets dispatched
if "start" in validated_data:
validated_data.pop("start")
return super(DatasetSerializer, self).create(validated_data)
class Meta:
model = Dataset
fields = (
"id",
"data",
"aggregate_by",
"scale_by",
"is_processing",
"is_processed",
"is_available",
"has_email",
"email_address",
"email_ccdl_ok",
"notify_me",
"expires_on",
"s3_bucket",
"s3_key",
"success",
"failure_reason",
"created_at",
"last_modified",
"start",
"size_in_bytes",
"sha1",
"experiments",
"organism_samples",
"download_url",
"quantile_normalize",
"quant_sf_only",
"svd_algorithm",
"worker_version",
)
extra_kwargs = {
"data": {
"required": True,
},
"id": {
"read_only": True,
},
"is_processing": {
"read_only": True,
},
"is_processed": {
"read_only": True,
},
"is_available": {
"read_only": True,
},
"email_address": {"required": False, "write_only": True},
"email_ccdl_ok": {"required": False, "write_only": True},
"notify_me": {"required": False, "write_only": True},
"expires_on": {
"read_only": True,
},
"s3_bucket": {
"read_only": True,
},
"s3_key": {
"read_only": True,
},
"success": {
"read_only": True,
},
"failure_reason": {
"read_only": True,
},
"created_at": {
"read_only": True,
},
"last_modified": {
"read_only": True,
},
"size_in_bytes": {
"read_only": True,
},
"sha1": {
"read_only": True,
},
"download_url": {
"read_only": True,
},
"worker_version": {
"read_only": True,
"help_text": "Returns the latest version of refine.bio that was used to build this dataset.",
},
}
def validate(self, data):
"""
Ensure this is something we want in our dataset.
"""
validate_dataset(data)
return data
def get_organism_samples(self, obj):
"""
Groups the sample accession codes inside a dataset by their organisms, eg:
{ HOMO_SAPIENS: [S1, S2], DANIO: [S3] }
Useful to avoid sending sample information on the downloads page
"""
samples = (
obj.get_samples()
.prefetch_related("organism")
.values("organism__name", "accession_code")
.order_by("organism__name", "accession_code")
)
result = defaultdict(list)
for sample in samples:
result[sample["organism__name"]].append(sample["accession_code"])
return result
def get_worker_version(self, obj):
processor_jobs = obj.processor_jobs.order_by("-created_at").values_list(
"worker_version", flat=True
)
if processor_jobs:
return processor_jobs[0]
else:
return None
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(
operation_description="View a single Dataset.",
manual_parameters=[
openapi.Parameter(
name="details",
in_=openapi.IN_QUERY,
type=openapi.TYPE_BOOLEAN,
description="When set to `True`, additional fields will be included in the response with details about the experiments in the dataset. This is used mostly on the dataset page in www.refine.bio",
)
],
),
)
@method_decorator(
name="update",
decorator=swagger_auto_schema(
operation_description="""
Modify an existing Dataset.
In order to begin smashing, an activated API key must be provided in the `API-KEY` header field of the request.
To acquire and activate an API key see the documentation for the [/token](#tag/token)
endpoint.
```py
import requests
import json
params = json.dumps({
'data': data,
'aggregate_by': 'EXPERIMENT',
'start': True,
'email_address': 'refinebio@gmail.com'
})
headers = {
'Content-Type': 'application/json',
'API-KEY': token_id # requested from /token
}
requests.put(host + '/v1/dataset/38879729-93c8-436d-9293-b95d3f274741/', params, headers=headers)
```
"""
),
)
class DatasetView(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
"""View and modify a single Dataset."""
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = "id"
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
serializer_context = super(DatasetView, self).get_serializer_context()
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
return {**serializer_context, "token": token}
except (APIToken.DoesNotExist, ValidationError):
return serializer_context
def validate_token(self):
# Make sure we have a valid activated token.
token_id = self.request.data.get("token_id", None)
if not token_id:
token_id = self.request.META.get("HTTP_API_KEY", None)
try:
APIToken.objects.get(id=token_id, is_activated=True)
except (APIToken.DoesNotExist, ValidationError):
raise BadRequest(
message="You must provide an active API token ID", error_type="invalid_token"
)
@staticmethod
def convert_ALL_to_accessions(data):
qn_organisms = Organism.get_objects_with_qn_targets()
for key in data["data"].keys():
accessions = data["data"][key]
if accessions == ["ALL"]:
experiment = get_object_or_404(Experiment, accession_code=key)
sample_codes = list(
experiment.samples.filter(
is_processed=True, organism__in=qn_organisms
).values_list("accession_code", flat=True)
)
data["data"][key] = sample_codes
def validate_email_address_is_nonempty(self):
"""Check to make sure the email exists. We call this when getting ready to dispatch a dataset"""
supplied_email_address = self.request.data.get("email_address", None)
if supplied_email_address is None or supplied_email_address == "":
raise BadRequest(
message="You must provide an email address.", error_type="invalid_email"
)
def dispatch_job(self, serializer, obj):
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = obj
pjda.save()
job_sent = False
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get("no_send_job", False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception as e:
# Just log whatever exception happens, because the foreman wil requeue the job anyway
logger.error(e)
if not job_sent:
raise APIException(
"Unable to queue download job. Something has gone"
" wrong and we have been notified about it."
)
serializer.validated_data["is_processing"] = True
obj = serializer.save()
# create a new dataset annotation with the information of this request
annotation = DatasetAnnotation()
annotation.dataset = obj
annotation.data = {
"start": True,
"ip": get_client_ip(self.request),
"user_agent": self.request.META.get("HTTP_USER_AGENT", None),
}
annotation.save()
def create_or_update(self, serializer):
"""If `start` is set, fire off the job. Otherwise just create/update the dataset"""
data = serializer.validated_data
DatasetView.convert_ALL_to_accessions(data)
if data.get("start"):
self.validate_token()
self.validate_email_address_is_nonempty()
obj = serializer.save()
self.dispatch_job(serializer, obj)
else:
serializer.save()
def perform_create(self, serializer):
# Since we are creating a new dataset, there is no way it is already processed
self.create_or_update(serializer)
def perform_update(self, serializer):
# Check to make sure we have not already processed the dataset
old_object = self.get_object()
if old_object.is_processed:
raise BadRequest(
message="You may not update Datasets which have already been processed"
)
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
elif old_object.is_processing:
self.validate_email_address_is_nonempty()
serializer.validated_data["data"] = old_object.data
serializer.validated_data["aggregate_by"] = old_object.aggregate_by
serializer.save()
else:
self.create_or_update(serializer)
<|code_end|>
|
Cache Docker Images by Branch
### Context
We want to be able to cache docker image layers that are created locally as testing artfacts locally to be used by github actions.
The current prepare_images.sh does this but there was an issue with the definition for branch_name.
We also don't want to remove support non-ccdl members developing locally.

### Solution or next step
- After #3285 is merged, we should set sensible defaults that can be overridden for external contributors.
- Get current branch name or tag to be set when pushing images to ccdl(staging) repo.
Determine:
- If they don't have access to the docker repo should we just build locally and not push?
- How long can docker tags be / are they compatible with our longer branch names.
| common/setup.py
<|code_start|>import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
VERSION_FILE = "version"
try:
with open(VERSION_FILE, "rt") as version_file:
version_string = version_file.read().strip().split("-")[0]
except OSError:
print(
"Cannot read version to determine System Version."
" Please create a file common/version containing an up to date System Version."
)
raise
setup(
name="data-refinery-common",
version=version_string,
packages=find_packages(),
include_package_data=True,
# These values are based on what is in common/requirements.txt.
install_requires=[
"boto3>=1.9.16",
"coverage>=4.5.1",
"daiquiri>=1.5.0",
"django>=3.2,<4",
"raven>=6.9.0",
"requests>=2.10.1",
"retrying>=1.3.3",
"psycopg2-binary>=2.7.5",
],
license="BSD License",
description="Common functionality to be shared between Data Refinery sub-projects.",
url="https://www.greenelab.com",
author="Kurt Wheeler",
author_email="team@greenelab.com",
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: Ubuntu",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Internet :: WWW/HTTP",
],
)
<|code_end|>
| common/setup.py
<|code_start|>import os
import re
from datetime import datetime
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
VERSION_FILE = "version"
try:
with open(VERSION_FILE, "rt") as version_file:
version_string = version_file.read().strip().split("-")[0]
except OSError:
print(
"Cannot read version file to determine system version. "
"Please create a file common/version containing an up to date system version."
)
raise
version_re = re.compile(
r"^([1-9][0-9]*!)?(0|[1-9][0-9]*)"
"(\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))"
"?(\.post(0|[1-9][0-9]*))?(\.dev(0|[1-9][0-9]*))?$"
)
if not version_re.match(version_string):
# Generate version based on the datetime.now(): e.g., 2023.5.17.dev1684352560.
now = datetime.now()
version_string = f"{now.strftime('%Y.%-m.%-d.dev')}{int(datetime.timestamp(now))}"
setup(
name="data-refinery-common",
version=version_string,
packages=find_packages(),
include_package_data=True,
# These values are based on what is in common/requirements.txt.
install_requires=[
"boto3>=1.9.16",
"coverage>=4.5.1",
"daiquiri>=1.5.0",
"django>=3.2,<4",
"raven>=6.9.0",
"requests>=2.10.1",
"retrying>=1.3.3",
"psycopg2-binary>=2.7.5",
],
license="BSD License",
description="Common functionality to be shared between Data Refinery sub-projects.",
url="https://www.greenelab.com",
author="Kurt Wheeler",
author_email="team@greenelab.com",
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: Ubuntu",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Internet :: WWW/HTTP",
],
)
<|code_end|>
|
Rename 'Refine.bio Mail Robot' to something descriptive
### Context
'Refine.bio Mail Robot' isn't super descriptive of what we use that email for. We use it to provide information about the datasets - processed or failed and the name should reflect that.
Also am a little concerned of the word robot getting it flagged and pushed into a non-primary inbox folder.
### Problem or idea
Rename it `refine.bio Datasets` (yes, lower case r)
### Solution or next step
@davidsmejia Is there anything else we use that email for?
| workers/data_refinery_workers/processors/smasher.py
<|code_start|># -*- coding: utf-8 -*-
import logging
import os
import shutil
import time
from datetime import timedelta
from pathlib import Path
from typing import Dict, List
from urllib.parse import quote
from django.conf import settings
from django.utils import timezone
import boto3
import pandas as pd
import psutil
import requests
from botocore.exceptions import ClientError
from sklearn import preprocessing
from data_refinery_common.enums import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import ComputedFile, Pipeline
from data_refinery_common.utils import calculate_file_size, calculate_sha1, get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
AWS_REGION = get_env_variable(
"AWS_REGION", "us-east-1"
) # Default to us-east-1 if the region variable can't be found
BODY_HTML = (
Path("data_refinery_workers/processors/smasher_email.min.html").read_text().replace("\n", "")
)
BODY_ERROR_HTML = (
Path("data_refinery_workers/processors/smasher_email_error.min.html")
.read_text()
.replace("\n", "")
)
BYTES_IN_GB = 1024 * 1024 * 1024
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName("DEBUG"))
PROCESS_POOL_SIZE = max(1, int(psutil.cpu_count() / 2 - 1))
SCALERS = {
"MINMAX": preprocessing.MinMaxScaler,
"STANDARD": preprocessing.StandardScaler,
"ROBUST": preprocessing.RobustScaler,
}
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message, total_cpu=psutil.cpu_percent(), process_ram=ram_in_GB, job_id=job_id)
if start_time:
logger.debug("Duration: %s" % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _inner_join(job_context: Dict) -> pd.DataFrame:
"""Performs an inner join across the all_frames key of job_context.
Returns a dataframe, not the job_context.
TODO: This function should be mostly unnecessary now because we
pretty much do this in the smashing utils but I don't want to rip
it out right now .
"""
# Merge all of the frames we've gathered into a single big frame, skipping duplicates.
# TODO: If the very first frame is the wrong platform, are we boned?
merged = job_context["all_frames"][0]
i = 1
old_len_merged = len(merged)
merged_backup = merged
while i < len(job_context["all_frames"]):
frame = job_context["all_frames"][i]
i = i + 1
if i % 1000 == 0:
logger.info("Smashing keyframe", i=i, job_id=job_context["job"].id)
# I'm not sure where these are sneaking in from, but we don't want them.
# Related: https://github.com/AlexsLemonade/refinebio/issues/390
breaker = False
for column in frame.columns:
if column in merged.columns:
breaker = True
if breaker:
logger.warning(
"Column repeated for smash job!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
column=column,
)
continue
# This is the inner join, the main "Smash" operation
merged = merged.merge(frame, how="inner", left_index=True, right_index=True)
new_len_merged = len(merged)
if new_len_merged < old_len_merged:
logger.warning(
"Dropped rows while smashing!",
dataset_id=job_context["dataset"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
)
if new_len_merged == 0:
logger.warning(
"Skipping a bad merge frame!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
bad_frame_number=i,
)
merged = merged_backup
new_len_merged = len(merged)
try:
job_context["unsmashable_files"].append(frame.columns[0])
except Exception:
# Something is really, really wrong with this frame.
pass
old_len_merged = len(merged)
merged_backup = merged
return merged
def process_frames_for_key(key: str, input_files: List[ComputedFile], job_context: Dict) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the key 'all_frames', a list of pandas
dataframes containing all the samples' data. Also adds the key
'unsmashable_files' containing a list of paths that were
determined to be unsmashable.
"""
job_context["original_merged"] = pd.DataFrame()
start_all_frames = log_state(
"Building list of all_frames key {}".format(key), job_context["job"].id
)
job_context["all_frames"] = []
for (computed_file, sample) in input_files:
frame_data = smashing_utils.process_frame(
job_context["work_dir"],
computed_file,
sample.accession_code,
job_context["dataset"].aggregate_by,
)
if frame_data is not None:
job_context["all_frames"].append(frame_data)
else:
logger.warning(
"Unable to smash file",
computed_file=computed_file.id,
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
)
job_context["unsmashable_files"].append(computed_file.filename)
log_state(
"Finished building list of all_frames key {}".format(key),
job_context["job"].id,
start_all_frames,
)
return job_context
def _smash_key(job_context: Dict, key: str, input_files: List[ComputedFile]) -> Dict:
"""Smash all of the input files together for a given key.
Steps:
Combine common genes (pandas merge)
Transpose such that genes are columns (features)
Scale features with sci-kit learn
Transpose again such that samples are columns and genes are rows
"""
start_smash = log_state("start _smash_key for {}".format(key), job_context["job"].id)
# Check if we need to copy the quant.sf files
if job_context["dataset"].quant_sf_only:
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
samples = [sample for (_, sample) in input_files]
job_context["num_samples"] += smashing_utils.sync_quant_files(
outfile_dir, samples, job_context["filtered_samples"]
)
# we ONLY want to give quant sf files to the user if that's what they requested
return job_context
job_context = process_frames_for_key(key, input_files, job_context)
if len(job_context["all_frames"]) < 1:
logger.error(
"Was told to smash a key with no frames!", job_id=job_context["job"].id, key=key
)
# TODO: is this the proper way to handle this? I can see us
# not wanting to fail an entire dataset because one experiment
# had a problem, but I also think it could be problematic to
# just skip an experiment and pretend nothing went wrong.
return job_context
merged = _inner_join(job_context)
job_context["original_merged"] = merged
log_state("end build all frames", job_context["job"].id, start_smash)
start_qn = log_state("start qn", job_context["job"].id, start_smash)
# Quantile Normalization
if job_context["dataset"].quantile_normalize:
job_context["merged_no_qn"] = merged
job_context["organism"] = job_context["dataset"].get_samples()[0].organism
job_context = smashing_utils.quantile_normalize(job_context)
merged = job_context.get("merged_qn", None)
# End QN
log_state("end qn", job_context["job"].id, start_qn)
# Transpose before scaling
transposed = merged.transpose()
start_scaler = log_state("starting scaler", job_context["job"].id)
# Scaler
if job_context["dataset"].scale_by != "NONE":
scale_funtion = SCALERS[job_context["dataset"].scale_by]
scaler = scale_funtion(copy=True)
scaler.fit(transposed)
scaled = pd.DataFrame(
scaler.transform(transposed), index=transposed.index, columns=transposed.columns
)
# Untranspose
untransposed = scaled.transpose()
else:
# Wheeeeeeeeeee
untransposed = transposed.transpose()
log_state("end scaler", job_context["job"].id, start_scaler)
# This is just for quality assurance in tests.
job_context["final_frame"] = untransposed
# Normalize the Header format
untransposed.index.rename("Gene", inplace=True)
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
outfile = outfile_dir + key + ".tsv"
job_context["smash_outfile"] = outfile
untransposed.to_csv(outfile, sep="\t", encoding="utf-8")
log_state("end _smash_key for {}".format(key), job_context["job"].id, start_smash)
return job_context
def _smash_all(job_context: Dict) -> Dict:
"""Perform smashing on all species/experiments in the dataset."""
start_smash = log_state("start smash", job_context["job"].id)
job_context["unsmashable_files"] = []
job_context["num_samples"] = 0
# Smash all of the sample sets
logger.debug(
"About to smash!",
dataset_count=len(job_context["dataset"].data),
job_id=job_context["job"].id,
)
try:
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop("input_files").items():
job_context = _smash_key(job_context, key, input_files)
except Exception as e:
raise utils.ProcessorJobError(
"Could not smash dataset: " + str(e),
success=False,
dataset_id=job_context["dataset"].id,
num_input_files=job_context["num_input_files"],
)
smashing_utils.write_non_data_files(job_context)
# Finally, compress all files into a zip
final_zip_base = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk)
try:
shutil.make_archive(final_zip_base, "zip", job_context["output_dir"])
except OSError:
raise utils.ProcessorJobError("Smash Error while generating zip file", success=False)
job_context["output_file"] = final_zip_base + ".zip"
job_context["dataset"].success = True
job_context["dataset"].save()
logger.debug("Created smash output!", archive_location=job_context["output_file"])
log_state("end smash", job_context["job"].id, start_smash)
return job_context
def _upload(job_context: Dict) -> Dict:
"""Uploads the result file to S3 and notifies user."""
if not job_context.get("upload", True) or not settings.RUNNING_IN_CLOUD:
return job_context
s3_client = boto3.client("s3")
output_filename = job_context["output_file"].split("/")[-1]
try:
# Note that file expiry is handled by the S3 object lifecycle,
# managed by terraform.
s3_client.upload_file(
job_context["output_file"],
RESULTS_BUCKET,
output_filename,
)
except Exception:
raise utils.ProcessorJobError(
"Failed to upload smash result file.", success=False, file=job_context["output_file"]
)
result_url = "https://s3.amazonaws.com/" + RESULTS_BUCKET + "/" + output_filename
job_context["result_url"] = result_url
logger.debug("Result uploaded!", result_url=job_context["result_url"])
return job_context
def _notify(job_context: Dict) -> Dict:
"""Use AWS SES to notify a user of a smash result.."""
if not job_context.get("upload", True) or not settings.RUNNING_IN_CLOUD:
return job_context
# Send a notification to slack when a dataset fails to be processed
if job_context["job"].success is False:
try:
_notify_slack_failed_dataset(job_context)
except Exception as e:
logger.warn(e) # It doesn't really matter if this didn't work
# Don't send an email if we don't have address or the user doesn't want an email.
if job_context["dataset"].email_address and job_context["dataset"].notify_me:
# Try to send the email.
try:
_notify_send_email(job_context)
# Display an error if something goes wrong.
except ClientError as e:
raise utils.ProcessorJobError(
"ClientError while notifying",
success=False,
exc_info=1,
client_error_message=e.response["Error"]["Message"],
)
except Exception:
raise utils.ProcessorJobError(
"General failure when trying to send email.",
success=False,
exc_info=1,
result_url=job_context["result_url"],
)
# We don't want to retry this dataset after we send a notification to users
# https://github.com/alexslemonade/refinebio/issues/1944
job_context["job"].no_retry = True
job_context["job"].save()
return job_context
def _notify_slack_failed_dataset(job_context: Dict):
"""Send a slack notification when a dataset fails to smash"""
# Link to the dataset page, where the user can re-try the download job
dataset_url = "https://www.refine.bio/dataset/" + str(job_context["dataset"].id)
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments": [
{
"fallback": "Dataset failed processing.",
"title": "Dataset failed processing",
"title_link": dataset_url,
"color": "#db3b28",
"text": job_context["job"].failure_reason,
"fields": [
{
"title": "Dataset id",
"value": str(job_context["dataset"].id),
"short": True,
},
{
"title": "Email",
"value": job_context["dataset"].email_address,
"short": True,
},
],
"footer": "Refine.bio",
"footer_icon": "https://s3.amazonaws.com/refinebio-email/logo-2x.png",
}
],
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def _notify_send_email(job_context):
"""Send email notification to the user if the dataset succeded or failed."""
dataset_url = "https://www.refine.bio/dataset/" + str(job_context["dataset"].id)
SENDER = "Refine.bio Mail Robot <noreply@refine.bio>"
RECIPIENT = job_context["dataset"].email_address
SLACK_CCDL_EMAIL = "z7m4g8w4o6f5e0e7@alexslemonade.slack.com"
CHARSET = "UTF-8"
if job_context["job"].success is False:
SUBJECT = "There was a problem processing your refine.bio dataset :("
BODY_TEXT = (
"We tried but were unable to process your requested dataset. Error was: \n\n"
+ str(job_context["job"].failure_reason)
+ "\nDataset ID: "
+ str(job_context["dataset"].id)
+ "\n We have been notified and are looking into the problem. \n\nSorry!"
)
ERROR_EMAIL_TITLE = quote("I can't download my dataset")
ERROR_EMAIL_BODY = quote(
"""
[What browser are you using?]
[Add details of the issue you are facing]
---
"""
+ str(job_context["dataset"].id)
)
FORMATTED_HTML = (
BODY_ERROR_HTML.replace("REPLACE_DATASET_URL", dataset_url)
.replace("REPLACE_ERROR_TEXT", job_context["job"].failure_reason)
.replace(
"REPLACE_NEW_ISSUE",
"https://github.com/AlexsLemonade/refinebio/issues/new?title={0}&body={1}&labels=bug".format(
ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY
),
)
.replace(
"REPLACE_MAILTO",
"mailto:requests@ccdatalab.org?subject={0}&body={1}".format(
ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY
),
)
)
job_context["success"] = False
else:
SUBJECT = "Your refine.bio Dataset is Ready!"
BODY_TEXT = "Hot off the presses:\n\n" + dataset_url + "\n\nLove!,\nThe refine.bio Team"
FORMATTED_HTML = BODY_HTML.replace("REPLACE_DOWNLOAD_URL", dataset_url).replace(
"REPLACE_DATASET_URL", dataset_url
)
# Create a new SES resource and specify a region.
client = boto3.client("ses", region_name=AWS_REGION)
# Provide the contents of the email.
client.send_email(
Destination={
"ToAddresses": [
RECIPIENT,
],
"BccAddresses": [
SLACK_CCDL_EMAIL,
],
},
Message={
"Body": {
"Html": {
"Charset": CHARSET,
"Data": FORMATTED_HTML,
},
"Text": {
"Charset": CHARSET,
"Data": BODY_TEXT,
},
},
"Subject": {
"Charset": CHARSET,
"Data": SUBJECT,
},
},
Source=SENDER,
)
def _update_result_objects(job_context: Dict) -> Dict:
"""Closes out the dataset object."""
dataset = job_context["dataset"]
dataset.s3_bucket = RESULTS_BUCKET
dataset.s3_key = job_context["output_file"].split("/")[-1]
dataset.size_in_bytes = calculate_file_size(job_context["output_file"])
dataset.sha1 = calculate_sha1(job_context["output_file"])
dataset.is_processing = False
dataset.is_processed = True
dataset.is_available = True
dataset.expires_on = timezone.now() + timedelta(days=7)
dataset.save()
if settings.RUNNING_IN_CLOUD and job_context.get("upload", True):
# File is uploaded and the metadata is updated, can delete the local.
try:
os.remove(job_context["output_file"])
except OSError:
pass
job_context["success"] = True
return job_context
def smash(job_id: int, upload=True) -> None:
"""Main Smasher interface"""
pipeline = Pipeline(name=PipelineEnum.SMASHER.value)
job_context = utils.run_pipeline(
{"job_id": job_id, "upload": upload, "pipeline": pipeline},
[
utils.start_job,
smashing_utils.prepare_files,
_smash_all,
_upload,
_update_result_objects,
utils.end_job,
],
)
# ensure that `notify` is always called so that users get emails in case processing fails or succeeds
job_context = _notify(job_context)
return job_context
<|code_end|>
| workers/data_refinery_workers/processors/smasher.py
<|code_start|># -*- coding: utf-8 -*-
import logging
import os
import shutil
import time
from datetime import timedelta
from pathlib import Path
from typing import Dict, List
from urllib.parse import quote
from django.conf import settings
from django.utils import timezone
import boto3
import pandas as pd
import psutil
import requests
from botocore.exceptions import ClientError
from sklearn import preprocessing
from data_refinery_common.enums import PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import ComputedFile, Pipeline
from data_refinery_common.utils import calculate_file_size, calculate_sha1, get_env_variable
from data_refinery_workers.processors import smashing_utils, utils
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
AWS_REGION = get_env_variable(
"AWS_REGION", "us-east-1"
) # Default to us-east-1 if the region variable can't be found
BODY_HTML = (
Path("data_refinery_workers/processors/smasher_email.min.html").read_text().replace("\n", "")
)
BODY_ERROR_HTML = (
Path("data_refinery_workers/processors/smasher_email_error.min.html")
.read_text()
.replace("\n", "")
)
BYTES_IN_GB = 1024 * 1024 * 1024
logger = get_and_configure_logger(__name__)
### DEBUG ###
logger.setLevel(logging.getLevelName("DEBUG"))
PROCESS_POOL_SIZE = max(1, int(psutil.cpu_count() / 2 - 1))
SCALERS = {
"MINMAX": preprocessing.MinMaxScaler,
"STANDARD": preprocessing.StandardScaler,
"ROBUST": preprocessing.RobustScaler,
}
def log_state(message, job_id, start_time=False):
if logger.isEnabledFor(logging.DEBUG):
process = psutil.Process(os.getpid())
ram_in_GB = process.memory_info().rss / BYTES_IN_GB
logger.debug(message, total_cpu=psutil.cpu_percent(), process_ram=ram_in_GB, job_id=job_id)
if start_time:
logger.debug("Duration: %s" % (time.time() - start_time), job_id=job_id)
else:
return time.time()
def _inner_join(job_context: Dict) -> pd.DataFrame:
"""Performs an inner join across the all_frames key of job_context.
Returns a dataframe, not the job_context.
TODO: This function should be mostly unnecessary now because we
pretty much do this in the smashing utils but I don't want to rip
it out right now .
"""
# Merge all of the frames we've gathered into a single big frame, skipping duplicates.
# TODO: If the very first frame is the wrong platform, are we boned?
merged = job_context["all_frames"][0]
i = 1
old_len_merged = len(merged)
merged_backup = merged
while i < len(job_context["all_frames"]):
frame = job_context["all_frames"][i]
i = i + 1
if i % 1000 == 0:
logger.info("Smashing keyframe", i=i, job_id=job_context["job"].id)
# I'm not sure where these are sneaking in from, but we don't want them.
# Related: https://github.com/AlexsLemonade/refinebio/issues/390
breaker = False
for column in frame.columns:
if column in merged.columns:
breaker = True
if breaker:
logger.warning(
"Column repeated for smash job!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
column=column,
)
continue
# This is the inner join, the main "Smash" operation
merged = merged.merge(frame, how="inner", left_index=True, right_index=True)
new_len_merged = len(merged)
if new_len_merged < old_len_merged:
logger.warning(
"Dropped rows while smashing!",
dataset_id=job_context["dataset"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
)
if new_len_merged == 0:
logger.warning(
"Skipping a bad merge frame!",
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
bad_frame_number=i,
)
merged = merged_backup
new_len_merged = len(merged)
try:
job_context["unsmashable_files"].append(frame.columns[0])
except Exception:
# Something is really, really wrong with this frame.
pass
old_len_merged = len(merged)
merged_backup = merged
return merged
def process_frames_for_key(key: str, input_files: List[ComputedFile], job_context: Dict) -> Dict:
"""Download, read, and chunk processed sample files from s3.
`key` is the species or experiment whose samples are contained in `input_files`.
Will add to job_context the key 'all_frames', a list of pandas
dataframes containing all the samples' data. Also adds the key
'unsmashable_files' containing a list of paths that were
determined to be unsmashable.
"""
job_context["original_merged"] = pd.DataFrame()
start_all_frames = log_state(
"Building list of all_frames key {}".format(key), job_context["job"].id
)
job_context["all_frames"] = []
for (computed_file, sample) in input_files:
frame_data = smashing_utils.process_frame(
job_context["work_dir"],
computed_file,
sample.accession_code,
job_context["dataset"].aggregate_by,
)
if frame_data is not None:
job_context["all_frames"].append(frame_data)
else:
logger.warning(
"Unable to smash file",
computed_file=computed_file.id,
dataset_id=job_context["dataset"].id,
job_id=job_context["job"].id,
)
job_context["unsmashable_files"].append(computed_file.filename)
log_state(
"Finished building list of all_frames key {}".format(key),
job_context["job"].id,
start_all_frames,
)
return job_context
def _smash_key(job_context: Dict, key: str, input_files: List[ComputedFile]) -> Dict:
"""Smash all of the input files together for a given key.
Steps:
Combine common genes (pandas merge)
Transpose such that genes are columns (features)
Scale features with sci-kit learn
Transpose again such that samples are columns and genes are rows
"""
start_smash = log_state("start _smash_key for {}".format(key), job_context["job"].id)
# Check if we need to copy the quant.sf files
if job_context["dataset"].quant_sf_only:
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
samples = [sample for (_, sample) in input_files]
job_context["num_samples"] += smashing_utils.sync_quant_files(
outfile_dir, samples, job_context["filtered_samples"]
)
# we ONLY want to give quant sf files to the user if that's what they requested
return job_context
job_context = process_frames_for_key(key, input_files, job_context)
if len(job_context["all_frames"]) < 1:
logger.error(
"Was told to smash a key with no frames!", job_id=job_context["job"].id, key=key
)
# TODO: is this the proper way to handle this? I can see us
# not wanting to fail an entire dataset because one experiment
# had a problem, but I also think it could be problematic to
# just skip an experiment and pretend nothing went wrong.
return job_context
merged = _inner_join(job_context)
job_context["original_merged"] = merged
log_state("end build all frames", job_context["job"].id, start_smash)
start_qn = log_state("start qn", job_context["job"].id, start_smash)
# Quantile Normalization
if job_context["dataset"].quantile_normalize:
job_context["merged_no_qn"] = merged
job_context["organism"] = job_context["dataset"].get_samples()[0].organism
job_context = smashing_utils.quantile_normalize(job_context)
merged = job_context.get("merged_qn", None)
# End QN
log_state("end qn", job_context["job"].id, start_qn)
# Transpose before scaling
transposed = merged.transpose()
start_scaler = log_state("starting scaler", job_context["job"].id)
# Scaler
if job_context["dataset"].scale_by != "NONE":
scale_funtion = SCALERS[job_context["dataset"].scale_by]
scaler = scale_funtion(copy=True)
scaler.fit(transposed)
scaled = pd.DataFrame(
scaler.transform(transposed), index=transposed.index, columns=transposed.columns
)
# Untranspose
untransposed = scaled.transpose()
else:
# Wheeeeeeeeeee
untransposed = transposed.transpose()
log_state("end scaler", job_context["job"].id, start_scaler)
# This is just for quality assurance in tests.
job_context["final_frame"] = untransposed
# Normalize the Header format
untransposed.index.rename("Gene", inplace=True)
outfile_dir = job_context["output_dir"] + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
outfile = outfile_dir + key + ".tsv"
job_context["smash_outfile"] = outfile
untransposed.to_csv(outfile, sep="\t", encoding="utf-8")
log_state("end _smash_key for {}".format(key), job_context["job"].id, start_smash)
return job_context
def _smash_all(job_context: Dict) -> Dict:
"""Perform smashing on all species/experiments in the dataset."""
start_smash = log_state("start smash", job_context["job"].id)
job_context["unsmashable_files"] = []
job_context["num_samples"] = 0
# Smash all of the sample sets
logger.debug(
"About to smash!",
dataset_count=len(job_context["dataset"].data),
job_id=job_context["job"].id,
)
try:
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop("input_files").items():
job_context = _smash_key(job_context, key, input_files)
except Exception as e:
raise utils.ProcessorJobError(
"Could not smash dataset: " + str(e),
success=False,
dataset_id=job_context["dataset"].id,
num_input_files=job_context["num_input_files"],
)
smashing_utils.write_non_data_files(job_context)
# Finally, compress all files into a zip
final_zip_base = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk)
try:
shutil.make_archive(final_zip_base, "zip", job_context["output_dir"])
except OSError:
raise utils.ProcessorJobError("Smash Error while generating zip file", success=False)
job_context["output_file"] = final_zip_base + ".zip"
job_context["dataset"].success = True
job_context["dataset"].save()
logger.debug("Created smash output!", archive_location=job_context["output_file"])
log_state("end smash", job_context["job"].id, start_smash)
return job_context
def _upload(job_context: Dict) -> Dict:
"""Uploads the result file to S3 and notifies user."""
if not job_context.get("upload", True) or not settings.RUNNING_IN_CLOUD:
return job_context
s3_client = boto3.client("s3")
output_filename = job_context["output_file"].split("/")[-1]
try:
# Note that file expiry is handled by the S3 object lifecycle,
# managed by terraform.
s3_client.upload_file(
job_context["output_file"],
RESULTS_BUCKET,
output_filename,
)
except Exception:
raise utils.ProcessorJobError(
"Failed to upload smash result file.", success=False, file=job_context["output_file"]
)
result_url = "https://s3.amazonaws.com/" + RESULTS_BUCKET + "/" + output_filename
job_context["result_url"] = result_url
logger.debug("Result uploaded!", result_url=job_context["result_url"])
return job_context
def _notify(job_context: Dict) -> Dict:
"""Use AWS SES to notify a user of a smash result.."""
if not job_context.get("upload", True) or not settings.RUNNING_IN_CLOUD:
return job_context
# Send a notification to slack when a dataset fails to be processed
if job_context["job"].success is False:
try:
_notify_slack_failed_dataset(job_context)
except Exception as e:
logger.warn(e) # It doesn't really matter if this didn't work
# Don't send an email if we don't have address or the user doesn't want an email.
if job_context["dataset"].email_address and job_context["dataset"].notify_me:
# Try to send the email.
try:
_notify_send_email(job_context)
# Display an error if something goes wrong.
except ClientError as e:
raise utils.ProcessorJobError(
"ClientError while notifying",
success=False,
exc_info=1,
client_error_message=e.response["Error"]["Message"],
)
except Exception:
raise utils.ProcessorJobError(
"General failure when trying to send email.",
success=False,
exc_info=1,
result_url=job_context["result_url"],
)
# We don't want to retry this dataset after we send a notification to users
# https://github.com/alexslemonade/refinebio/issues/1944
job_context["job"].no_retry = True
job_context["job"].save()
return job_context
def _notify_slack_failed_dataset(job_context: Dict):
"""Send a slack notification when a dataset fails to smash"""
# Link to the dataset page, where the user can re-try the download job
dataset_url = "https://www.refine.bio/dataset/" + str(job_context["dataset"].id)
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments": [
{
"fallback": "Dataset failed processing.",
"title": "Dataset failed processing",
"title_link": dataset_url,
"color": "#db3b28",
"text": job_context["job"].failure_reason,
"fields": [
{
"title": "Dataset id",
"value": str(job_context["dataset"].id),
"short": True,
},
{
"title": "Email",
"value": job_context["dataset"].email_address,
"short": True,
},
],
"footer": "Refine.bio",
"footer_icon": "https://s3.amazonaws.com/refinebio-email/logo-2x.png",
}
],
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def _notify_send_email(job_context):
"""Send email notification to the user if the dataset succeded or failed."""
dataset_url = "https://www.refine.bio/dataset/" + str(job_context["dataset"].id)
SENDER = "refine.bio Datasets <noreply@refine.bio>"
RECIPIENT = job_context["dataset"].email_address
SLACK_CCDL_EMAIL = "z7m4g8w4o6f5e0e7@alexslemonade.slack.com"
CHARSET = "UTF-8"
if job_context["job"].success is False:
SUBJECT = "There was a problem processing your refine.bio dataset :("
BODY_TEXT = (
"We tried but were unable to process your requested dataset. Error was: \n\n"
+ str(job_context["job"].failure_reason)
+ "\nDataset ID: "
+ str(job_context["dataset"].id)
+ "\n We have been notified and are looking into the problem. \n\nSorry!"
)
ERROR_EMAIL_TITLE = quote("I can't download my dataset")
ERROR_EMAIL_BODY = quote(
"""
[What browser are you using?]
[Add details of the issue you are facing]
---
"""
+ str(job_context["dataset"].id)
)
FORMATTED_HTML = (
BODY_ERROR_HTML.replace("REPLACE_DATASET_URL", dataset_url)
.replace("REPLACE_ERROR_TEXT", job_context["job"].failure_reason)
.replace(
"REPLACE_NEW_ISSUE",
"https://github.com/AlexsLemonade/refinebio/issues/new?title={0}&body={1}&labels=bug".format(
ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY
),
)
.replace(
"REPLACE_MAILTO",
"mailto:requests@ccdatalab.org?subject={0}&body={1}".format(
ERROR_EMAIL_TITLE, ERROR_EMAIL_BODY
),
)
)
job_context["success"] = False
else:
SUBJECT = "Your refine.bio Dataset is Ready!"
BODY_TEXT = "Hot off the presses:\n\n" + dataset_url + "\n\nLove!,\nThe refine.bio Team"
FORMATTED_HTML = BODY_HTML.replace("REPLACE_DOWNLOAD_URL", dataset_url).replace(
"REPLACE_DATASET_URL", dataset_url
)
# Create a new SES resource and specify a region.
client = boto3.client("ses", region_name=AWS_REGION)
# Provide the contents of the email.
client.send_email(
Destination={
"ToAddresses": [
RECIPIENT,
],
"BccAddresses": [
SLACK_CCDL_EMAIL,
],
},
Message={
"Body": {
"Html": {
"Charset": CHARSET,
"Data": FORMATTED_HTML,
},
"Text": {
"Charset": CHARSET,
"Data": BODY_TEXT,
},
},
"Subject": {
"Charset": CHARSET,
"Data": SUBJECT,
},
},
Source=SENDER,
)
def _update_result_objects(job_context: Dict) -> Dict:
"""Closes out the dataset object."""
dataset = job_context["dataset"]
dataset.s3_bucket = RESULTS_BUCKET
dataset.s3_key = job_context["output_file"].split("/")[-1]
dataset.size_in_bytes = calculate_file_size(job_context["output_file"])
dataset.sha1 = calculate_sha1(job_context["output_file"])
dataset.is_processing = False
dataset.is_processed = True
dataset.is_available = True
dataset.expires_on = timezone.now() + timedelta(days=7)
dataset.save()
if settings.RUNNING_IN_CLOUD and job_context.get("upload", True):
# File is uploaded and the metadata is updated, can delete the local.
try:
os.remove(job_context["output_file"])
except OSError:
pass
job_context["success"] = True
return job_context
def smash(job_id: int, upload=True) -> None:
"""Main Smasher interface"""
pipeline = Pipeline(name=PipelineEnum.SMASHER.value)
job_context = utils.run_pipeline(
{"job_id": job_id, "upload": upload, "pipeline": pipeline},
[
utils.start_job,
smashing_utils.prepare_files,
_smash_all,
_upload,
_update_result_objects,
utils.end_job,
],
)
# ensure that `notify` is always called so that users get emails in case processing fails or succeeds
job_context = _notify(job_context)
return job_context
<|code_end|>
|
Clean up AWS Batch job definition list
### Problem or idea
The Batch job definition section contains 100+ pages of jobs (~2500 items). They haven't been cleaned up properly during deploy process for a while.
### Solution or next step
Clean up stale items, make sure job deregistering script takes care of old job definitions in a right way.
| infrastructure/delete_batch_job_queue.py
<|code_start|>import os
from time import sleep
import boto3
AWS_REGION = os.environ["AWS_REGION"]
AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
batch = boto3.client("batch", region_name=AWS_REGION)
# First disable each job queue.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
try:
batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
except Exception as e:
# If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
pass
# Then wait for each one to be disabled so it can be deleted.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
while True:
job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])
if "jobQueues" in job_queues:
job_queue = job_queues["jobQueues"][0]
if job_queue["state"] == "DISABLED" and job_queue["status"] != "UPDATING":
break
else:
print(f"Unexpected response while describing job queue {batch_queue_name}.")
break
sleep(3)
batch.delete_job_queue(jobQueue=batch_queue_name)
<|code_end|>
infrastructure/deregister_batch_job_definitions.py
<|code_start|>import os
import boto3
AWS_REGION = os.environ["AWS_REGION"]
batch = boto3.client("batch", region_name=AWS_REGION)
# TODO: stop repeating this construction everywhere. Just set it once somewhere.
JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
job_definition_files = os.listdir("batch-job-templates")
job_definition_list = list(
{JOB_DEFINITION_PREFIX + job_def.upper().split(".")[0] for job_def in job_definition_files}
)
# Have to go one by one because providing a list of job names doesn't work:
# https://github.com/boto/boto3/issues/2908
for job_definition in job_definition_list:
job_definitions = batch.describe_job_definitions(
jobDefinitionName=job_definition, status="ACTIVE"
)
# There can be multiple revisions per job definition. We want them all gone.
for job_definition_revision in job_definitions["jobDefinitions"]:
batch.deregister_job_definition(jobDefinition=job_definition_revision["jobDefinitionArn"])
<|code_end|>
| infrastructure/delete_batch_job_queue.py
<|code_start|>import os
from time import sleep
import boto3
from botocore.exceptions import ClientError
AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# First disable each job queue.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
try:
batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
except ClientError as e:
# If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
if str(e).endswith(" does not exist."):
pass
else:
raise e
# Then wait for each one to be disabled so it can be deleted.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
while True:
job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])
if "jobQueues" in job_queues:
job_queue = job_queues["jobQueues"][0]
if job_queue["state"] == "DISABLED" and job_queue["status"] != "UPDATING":
break
else:
print(f"Unexpected response while describing job queue {batch_queue_name}.")
break
sleep(3)
batch.delete_job_queue(jobQueue=batch_queue_name)
<|code_end|>
infrastructure/deregister_batch_job_definitions.py
<|code_start|>import os
import boto3
batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# TODO: stop repeating this construction everywhere. Just set it once somewhere.
JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
job_names = (
JOB_DEFINITION_PREFIX + batch_job_template.upper().split(".")[0]
for batch_job_template in os.listdir("batch-job-templates")
)
nextToken = ""
# Have to go one by one because providing a list of job names doesn't work:
# https://github.com/boto/boto3/issues/2908
for job_name in sorted(job_names):
while True:
data = {
"jobDefinitionName": job_name,
"maxResults": 100,
"status": "ACTIVE",
}
if nextToken:
data["nextToken"] = nextToken
response = batch.describe_job_definitions(**data)
nextToken = response.get("nextToken", "")
job_definitions = response.get("jobDefinitions")
if not job_definitions:
break
# There can be multiple revisions per job definition. We want them all gone.
for job_definition in job_definitions:
batch.deregister_job_definition(jobDefinition=job_definition["jobDefinitionArn"])
<|code_end|>
|
Weekly stats shows 0 downloads for some users
### Context
A lot of changes has gone into prod recently. One of them is the EngagementBot weekly stats updates.
### Problem or idea
The most recent summary contains lines like "0 downloads from " indicating some (potential) stats inaccuracy.
### Solution or next step
Figure out why the value is empty and fix the issue. If everything is right then just hide those 0 downloads items.
| api/data_refinery_api/management/commands/post_downloads_summary.py
<|code_start|>import datetime
from collections import Counter
from django.conf import settings
from django.core.management.base import BaseCommand
from django.template.defaultfilters import pluralize
from django.utils import timezone
import requests
from data_refinery_common.models import Dataset, DatasetAnnotation
class Command(BaseCommand):
help = "Post downloads summary to Slack"
def add_arguments(self, parser):
parser.add_argument(
"--channel",
type=str,
default="ccdl-general",
help=("Optional parameter to choose the channel where the message will be posted."),
)
parser.add_argument(
"--days",
type=int,
default=7, # Default to a week.
help=("Number of days in the past for which to build the stats."),
)
parser.add_argument(
"--top-countries",
type=int,
default=5,
help=("Number of countries to show in the per country downloads summary."),
)
def handle(self, *args, **options):
post_downloads_summary(options["days"], options["channel"], options["top_countries"])
def format_user_data(header, data):
"""
Formats user email, downloads count, location information sorted
by downloads count.
"""
# Allowed overhead for 2 column sorting: downloads count, email.
lines = sorted(data, key=lambda u: u[0].lower())
lines = [
f"{email.lower()} | {downloads} download{pluralize(downloads)} from {location}"
for email, downloads, location in sorted(lines, key=lambda u: u[1], reverse=True)
]
lines.insert(0, header)
return "\n".join(lines)
def get_user_location(ip_address):
"""Gets user location information based on their IP address."""
try:
data = requests.get(f"https://ipapi.co/{ip_address}/json/", timeout=10).json()
# The list of available fields https://ipapi.co/api/#complete-location
return ", ".join((data["city"], data["country_name"]))
except (requests.exceptions.RequestException, KeyError, ValueError):
return ip_address
def post_downloads_summary(days, channel, top_countries=5):
"""Posts downloads summary to Slack channel."""
start_time = timezone.now() - datetime.timedelta(days=days)
datasets = Dataset.processed_filtered_objects.filter(
created_at__gt=start_time
).prefetch_related("datasetannotation_set")
annotations = DatasetAnnotation.objects.filter(dataset__in=datasets)
users_emails = set(dataset.email_address for dataset in datasets)
locations = set()
locations_cache = dict()
for annotation in annotations:
if "location" not in annotation.data:
ip_address = annotation.data["ip"]
if ip_address not in locations_cache:
locations_cache[ip_address] = get_user_location(ip_address)
# Save the locations permanently, since IP addresses can cycle over time.
annotation.data["location"] = locations_cache[ip_address]
annotation.save()
locations.add(annotation.data["location"])
downloads_per_country = Counter()
downloads_total = 0
new_users = []
returning_users = []
for user_email in users_emails:
user_annotations = annotations.filter(dataset__email_address=user_email)
user_downloads = user_annotations.count()
downloads_total += user_downloads
user_locations = set()
for user_annotation in user_annotations:
user_locations.add(user_annotation.data["location"])
try:
country = user_annotation.data["location"].split(", ")[1]
downloads_per_country.update({country: 1})
except (IndexError, TypeError):
pass
user_locations = "; ".join(sorted(user_locations))
user_data = (user_email, user_downloads, user_locations)
is_returning_user = Dataset.processed_filtered_objects.filter(
created_at__lt=start_time, email_address=user_email
)
if is_returning_user:
returning_users.append(user_data)
else:
new_users.append(user_data)
if downloads_total:
locations_count = len(locations)
users_emails_count = len(users_emails)
fallback_text = (
f"In the last {days} day{pluralize(days)}, {users_emails_count} "
f"user{pluralize(users_emails_count)} downloaded {downloads_total} "
f"dataset{pluralize(downloads_total)} from {locations_count} "
f"location{pluralize(locations_count)}."
)
else:
fallback_text = f"There were no downloads in the last {days} day{pluralize(days)}."
blocks = [
{"type": "section", "text": {"type": "plain_text", "emoji": True, "text": fallback_text}}
]
if new_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": format_user_data("*New users*", new_users)},
}
)
if returning_users:
blocks.append(
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": format_user_data("*Returning users*", returning_users),
},
}
)
if top_countries and downloads_per_country:
countries_count = downloads_per_country.most_common(top_countries)
top_countries = min(top_countries, len(countries_count))
lines = [f"*Top {top_countries} countr{pluralize(top_countries, 'y,ies')}*"]
# Allowed overhead for 2 column sorting: downloads count, country.
countries_count = sorted(countries_count, key=lambda cc: cc[0])
countries_count = sorted(countries_count, key=lambda cc: cc[1], reverse=True)
for country, count in countries_count:
lines.append(f"{country}: {count} download{pluralize(count)}")
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "\n".join(lines)},
}
)
# Post to Slack.
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"channel": f"#{channel}",
"text": fallback_text,
"blocks": blocks,
},
headers={"Content-Type": "application/json"},
timeout=10,
)
<|code_end|>
| api/data_refinery_api/management/commands/post_downloads_summary.py
<|code_start|>import datetime
from collections import Counter
from django.conf import settings
from django.core.management.base import BaseCommand
from django.template.defaultfilters import pluralize
from django.utils import timezone
import requests
from data_refinery_common.models import Dataset, DatasetAnnotation
class Command(BaseCommand):
help = "Post downloads summary to Slack"
def add_arguments(self, parser):
parser.add_argument(
"--channel",
type=str,
default="ccdl-general",
help=("Optional parameter to choose the channel where the message will be posted."),
)
parser.add_argument(
"--days",
type=int,
default=7, # Default to a week.
help=("Number of days in the past for which to build the stats."),
)
parser.add_argument(
"--top-countries",
type=int,
default=5,
help=("Number of countries to show in the per country downloads summary."),
)
def handle(self, *args, **options):
post_downloads_summary(options["days"], options["channel"], options["top_countries"])
def format_user_data(header, data):
"""
Formats user email, downloads count, location information sorted
by downloads count.
"""
# Allowed overhead for 2 column sorting: downloads count, email.
lines = sorted(data, key=lambda u: u[0].lower())
lines = [
f"{email.lower()} | {downloads} download{pluralize(downloads)} from {location}"
for email, downloads, location in sorted(lines, key=lambda u: u[1], reverse=True)
]
lines.insert(0, header)
return "\n".join(lines)
def get_user_location(ip_address):
"""Gets user location information based on their IP address."""
try:
data = requests.get(f"https://ipapi.co/{ip_address}/json/", timeout=10).json()
# The list of available fields https://ipapi.co/api/#complete-location
return ", ".join((data["city"], data["country_name"]))
except (requests.exceptions.RequestException, KeyError, ValueError):
return ip_address
def post_downloads_summary(days, channel, top_countries=5):
"""Posts downloads summary to Slack channel."""
start_time = timezone.now() - datetime.timedelta(days=days)
datasets = Dataset.processed_filtered_objects.filter(
created_at__gt=start_time
).prefetch_related("datasetannotation_set")
annotations = DatasetAnnotation.objects.filter(dataset__in=datasets)
users_emails = set(dataset.email_address for dataset in datasets)
locations = set()
locations_cache = {}
for annotation in annotations:
if "location" not in annotation.data:
ip_address = annotation.data["ip"]
if ip_address not in locations_cache:
locations_cache[ip_address] = get_user_location(ip_address)
# Save the locations permanently, since IP addresses can cycle over time.
annotation.data["location"] = locations_cache[ip_address]
annotation.save()
locations.add(annotation.data["location"])
downloads_per_country = Counter()
downloads_total = 0
new_users = []
returning_users = []
for user_email in users_emails:
user_annotations = annotations.filter(dataset__email_address=user_email)
user_downloads = user_annotations.count()
if user_downloads == 0:
continue
downloads_total += user_downloads
user_locations = set()
for user_annotation in user_annotations:
user_locations.add(user_annotation.data["location"])
try:
country = user_annotation.data["location"].split(", ")[1]
downloads_per_country.update({country: 1})
except (IndexError, TypeError):
pass
user_locations = "; ".join(sorted(user_locations))
user_data = (user_email, user_downloads, user_locations)
is_returning_user = Dataset.processed_filtered_objects.filter(
created_at__lt=start_time, email_address=user_email
).exists()
if is_returning_user:
returning_users.append(user_data)
else:
new_users.append(user_data)
if downloads_total > 0:
locations_count = len(locations)
users_count = len(new_users) + len(returning_users)
fallback_text = (
f"In the last {days} day{pluralize(days)}, {users_count} "
f"user{pluralize(users_count)} downloaded {downloads_total} "
f"dataset{pluralize(downloads_total)} from {locations_count} "
f"location{pluralize(locations_count)}."
)
else:
fallback_text = f"There were no downloads in the last {days} day{pluralize(days)}."
blocks = [
{
"type": "section",
"text": {"type": "plain_text", "emoji": True, "text": fallback_text},
}
]
if new_users:
blocks.append(
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": format_user_data("*New users*", new_users),
},
}
)
if returning_users:
blocks.append(
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": format_user_data("*Returning users*", returning_users),
},
}
)
if top_countries and downloads_per_country:
countries_count = downloads_per_country.most_common(top_countries)
top_countries = min(top_countries, len(countries_count))
lines = [f"*Top {top_countries} countr{pluralize(top_countries, 'y,ies')}*"]
# Allowed overhead for 2 column sorting: downloads count, country.
countries_count = sorted(countries_count, key=lambda cc: cc[0])
countries_count = sorted(countries_count, key=lambda cc: cc[1], reverse=True)
for country, count in countries_count:
lines.append(f"{country}: {count} download{pluralize(count)}")
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "\n".join(lines)},
}
)
# Post to Slack.
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"channel": f"#{channel}",
"text": fallback_text,
"blocks": blocks,
},
headers={"Content-Type": "application/json"},
timeout=10,
)
<|code_end|>
|
Support filtering on status in EPR
couldnt really find a nice place in WMS metadata spec to document the supported values for custom query parameters so i crammed it into the wms abstract...
| gen_ziektenplagenexotengroen.py
<|code_start|><|code_end|>
| gen_ziektenplagenexotengroen.py
<|code_start|>#!/usr/bin/env python
# Generates the ziektenplagenexotengroen mapfile.
# TODO rewrite using mappyfile, generalize.
from contextlib import contextmanager
indent: int = 0
def p(*strs: list[str]):
print(" " * indent, end="")
print(strs[0], end="")
if len(strs) > 1:
print(" ", end="")
print_quoted(strs[1:])
def q(*strs: list[str]):
print(" " * indent, end="")
print_quoted(strs)
def print_quoted(strs: list[str]):
print(*map(repr, strs))
@contextmanager
def block(typ: str):
p(typ)
global indent
indent += 1
yield
indent -= 1
p("END")
layers = [
("Eikenprocessierups aanwezig (Laag)", "caterpillar_blue"),
("Eikenprocessierups deels bestreden", "tree_orange"),
("Niet in beheergebied Gemeente Amsterdam", "flag_black"),
("Eikenprocessierups aanwezig (Urgent)", "caterpillar_red"),
("Gemeld", "speechbubble"),
("Eikenprocessierups bestreden", "tree_green"),
("Geen Eikenprocessierups aanwezig", "tree_black"),
("Niet bereikbaar voor bestrijding", "flag_red"),
("Eikenprocessierups aanwezig (Standaard)", "caterpillar_orange"),
]
print("# GENERATED FILE, DO NOT EDIT.\n\n")
print("# TEAM: Bor / Beeldschoon\n")
with block("MAP"):
p("NAME", "ziektenplagenexotengroen")
p("INCLUDE", "header.inc")
p("DEBUG", 5)
with block("WEB"):
with block("METADATA"):
q("ows_title", "Ziekte, plagen, exoten, groen")
q(
"ows_abstract",
"Kaart met gegevens over ziekten, plagen en exoten in het groen in de gemeente Amsterdam",
)
q("wms_extent", "100000 450000 150000 500000")
for name, icon in layers:
with block("LAYER"):
p("NAME", name)
with block("PROJECTION"):
q("init=epsg:28992")
p("INCLUDE", "connection_dataservices.inc")
p(
"DATA",
"geometrie FROM public.ziekte_plagen_exoten_groen_eikenprocessierups USING srid=28992 USING UNIQUE id",
)
p("TYPE POINT")
with block("METADATA"):
q("wfs_enable_request", "none")
q("wms_title", name)
q("wms_enable_request", "*")
q("wms_abstract", "Eikenprocessierups Amsterdam")
q("wms_srs", "EPSG:28992")
q("wms_name", "Eikenprocessierups")
q("wms_format", "image/png")
q("wms_server_version", "1.3.0")
q("wms_extent", "100000 450000 150000 500000")
p("LABELITEM", "urgentie_status_kaartlaag")
p("CLASSITEM", "urgentie_status_kaartlaag")
with block("CLASS"):
p("NAME", name)
p("EXPRESSION", name)
with block("STYLE"):
p("SYMBOL", icon)
p("SIZE", 20)
<|code_end|>
|
SPF support could be confusing
I note that the SPF RR type was discontinued in RFC 7208 (see https://tools.ietf.org/html/rfc7208#section-3.1). I tried searching for my own SPF records with the SPF type, but of course they were not found (they do show up as TXT records, which they are). This could lead to confusion as to whether the command should return SPF records (which are implemented as standard TXT records) or records of the SPF RR type (which has been discontinued).
For clarity, I would suggest that lexicon drop the SPF RR type. Alternately, you could allow listing the SPF RR type (possibly with a printed message for the user that notes that SPF records implemented as TXT records will not be returned), and disallow creation of SPF RRs (since current RFC specifies that it must be implemented by TXT record only).
| lexicon/__main__.py
<|code_start|>#!/usr/bin/env python
import argparse
import os
import importlib
import sys
from client import Client
import pkg_resources
#based off https://docs.python.org/2/howto/argparse.html
def BaseProviderParser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("action", help="specify the action to take", default='list', choices=['create', 'list', 'update', 'delete'])
parser.add_argument("domain", help="specify the domain, supports subdomains as well")
parser.add_argument("type", help="specify the entry type", default='TXT', choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SPF', 'SOA', 'TXT', 'SRV', 'LOC'])
parser.add_argument("--name", help="specify the record name")
parser.add_argument("--content", help="specify the record content")
parser.add_argument("--ttl", help="specify the record time-to-live")
parser.add_argument("--priority", help="specify the record priority")
parser.add_argument("--identifier", help="specify the record for update or delete actions")
return parser
def MainParser():
current_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'providers')
providers = [os.path.splitext(f)[0] for f in os.listdir(current_filepath) if os.path.isfile(os.path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
providers = sorted(providers)
parser = argparse.ArgumentParser(description='Create, Update, Delete, List DNS entries')
parser.add_argument('--version', help="show the current version of lexicon", action='version', version='%(prog)s {0}'.format(pkg_resources.get_distribution("dns-lexicon").version))
subparsers = parser.add_subparsers(dest='provider_name', help='specify the DNS provider to use')
for provider in providers:
provider_module = importlib.import_module('lexicon.providers.' + provider)
provider_parser = getattr(provider_module, 'ProviderParser')
subparser = subparsers.add_parser(provider, help='{0} provider'.format(provider), parents=[BaseProviderParser()])
provider_parser(subparser)
return parser
#dynamically determine all the providers available.
def main():
parsed_args = MainParser().parse_args()
print parsed_args
client = Client(parsed_args.__dict__)
client.execute()
if __name__ == '__main__':
main()
<|code_end|>
lexicon/client.py
<|code_start|>import importlib
import os
import tldextract
#from providers import Example
class Client:
def __init__(self, options):
#validate options
self._validate(options)
#process domain, strip subdomain
domain_parts = tldextract.extract(options.get('domain'))
options['domain'] = '{0}.{1}'.format(domain_parts.domain, domain_parts.suffix)
self.action = options.get('action')
self.provider_name = options.get('provider_name')
self.options = options
self._parse_env()
provider_module = importlib.import_module('lexicon.providers.' + self.provider_name)
provider_class = getattr(provider_module, 'Provider')
self.provider = provider_class(self.options)
def execute(self):
self.provider.authenticate()
if self.action == 'create':
return self.provider.create_record(self.options.get('type'), self.options.get('name'), self.options.get('content'))
elif self.action == 'list':
return self.provider.list_records(self.options.get('type'), self.options.get('name'), self.options.get('content'))
elif self.action == 'update':
return self.provider.update_record(self.options.get('identifier'), self.options.get('type'), self.options.get('name'), self.options.get('content'))
elif self.action == 'delete':
return self.provider.delete_record(self.options.get('identifier'), self.options.get('type'), self.options.get('name'), self.options.get('content'))
def _parse_env(self):
# make sure that auth parameters can be specified via environmental variables as well.
# basically we map env variables for the chosen provider to the options dictionary (if a value isnt already provided)
# LEXICON_CLOUDFLARE_TOKEN => options['auth_token']
# LEXICON_CLOUDFLARE_USERNAME => options['auth_username']
# LEXICON_CLOUDFLARE_PASSWORD => options['auth_password']
env_prefix = 'LEXICON_{0}_'.format(self.provider_name.upper())
for key in os.environ.keys():
if key.startswith(env_prefix):
auth_type = key[len(env_prefix):].lower()
self.options['auth_{0}'.format(auth_type)] = self.options.get('auth_{0}'.format(auth_type), os.environ[key])
def _validate(self, options):
if not options.get('provider_name'):
raise AttributeError('provider_name')
if not options.get('action'):
raise AttributeError('action')
if not options.get('domain'):
raise AttributeError('domain')
if not options.get('type'):
raise AttributeError('type')<|code_end|>
| lexicon/__main__.py
<|code_start|>#!/usr/bin/env python
import argparse
import os
import importlib
import sys
from client import Client
import pkg_resources
#based off https://docs.python.org/2/howto/argparse.html
def BaseProviderParser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("action", help="specify the action to take", default='list', choices=['create', 'list', 'update', 'delete'])
parser.add_argument("domain", help="specify the domain, supports subdomains as well")
parser.add_argument("type", help="specify the entry type", default='TXT', choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC'])
parser.add_argument("--name", help="specify the record name")
parser.add_argument("--content", help="specify the record content")
parser.add_argument("--ttl", help="specify the record time-to-live")
parser.add_argument("--priority", help="specify the record priority")
parser.add_argument("--identifier", help="specify the record for update or delete actions")
return parser
def MainParser():
current_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'providers')
providers = [os.path.splitext(f)[0] for f in os.listdir(current_filepath) if os.path.isfile(os.path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
providers = sorted(providers)
parser = argparse.ArgumentParser(description='Create, Update, Delete, List DNS entries')
parser.add_argument('--version', help="show the current version of lexicon", action='version', version='%(prog)s {0}'.format(pkg_resources.get_distribution("dns-lexicon").version))
subparsers = parser.add_subparsers(dest='provider_name', help='specify the DNS provider to use')
for provider in providers:
provider_module = importlib.import_module('lexicon.providers.' + provider)
provider_parser = getattr(provider_module, 'ProviderParser')
subparser = subparsers.add_parser(provider, help='{0} provider'.format(provider), parents=[BaseProviderParser()])
provider_parser(subparser)
return parser
#dynamically determine all the providers available.
def main():
parsed_args = MainParser().parse_args()
print parsed_args
client = Client(parsed_args.__dict__)
client.execute()
if __name__ == '__main__':
main()
<|code_end|>
lexicon/client.py
<|code_start|>import importlib
import os
import tldextract
#from providers import Example
class Client:
def __init__(self, options):
#validate options
self._validate(options)
#process domain, strip subdomain
domain_parts = tldextract.extract(options.get('domain'))
options['domain'] = '{0}.{1}'.format(domain_parts.domain, domain_parts.suffix)
self.action = options.get('action')
self.provider_name = options.get('provider_name')
self.options = options
self._parse_env()
provider_module = importlib.import_module('lexicon.providers.' + self.provider_name)
provider_class = getattr(provider_module, 'Provider')
self.provider = provider_class(self.options)
def execute(self):
self.provider.authenticate()
if self.action == 'create':
return self.provider.create_record(self.options.get('type'), self.options.get('name'), self.options.get('content'))
elif self.action == 'list':
return self.provider.list_records(self.options.get('type'), self.options.get('name'), self.options.get('content'))
elif self.action == 'update':
return self.provider.update_record(self.options.get('identifier'), self.options.get('type'), self.options.get('name'), self.options.get('content'))
elif self.action == 'delete':
return self.provider.delete_record(self.options.get('identifier'), self.options.get('type'), self.options.get('name'), self.options.get('content'))
def _parse_env(self):
# make sure that auth parameters can be specified via environmental variables as well.
# basically we map env variables for the chosen provider to the options dictionary (if a value isnt already provided)
# LEXICON_CLOUDFLARE_TOKEN => options['auth_token']
# LEXICON_CLOUDFLARE_USERNAME => options['auth_username']
# LEXICON_CLOUDFLARE_PASSWORD => options['auth_password']
env_prefix = 'LEXICON_{0}_'.format(self.provider_name.upper())
for key in os.environ.keys():
if key.startswith(env_prefix):
auth_type = key[len(env_prefix):].lower()
# only assign auth_username/token/etc if its not already provided by CLI.
if self.options.get('auth_{0}'.format(auth_type)) is None:
self.options['auth_{0}'.format(auth_type)] = os.environ[key]
def _validate(self, options):
if not options.get('provider_name'):
raise AttributeError('provider_name')
if not options.get('action'):
raise AttributeError('action')
if not options.get('domain'):
raise AttributeError('domain')
if not options.get('type'):
raise AttributeError('type')<|code_end|>
|
DNSPod Provider
DNSPod is one of the biggest DNS provider in China.
Here I wrote a DNSPod adapter.
and, I found lexicon can't work now. I did some fix in parsing environment variables.
notice, there is a --auth-id that not "standardize".
DNSPod API auth with an id and Token. this id is given one per token, it's not login username. so I use --auth-id to avoid ambiguity.
but if you prefer standardized way, it's also okey to rename it to --auth-username, since DNSPod will not auth true username when using API access.
| lexicon/providers/dnspod.py
<|code_start|><|code_end|>
| lexicon/providers/dnspod.py
<|code_start|># -*- coding: utf-8 -*-
from base import Provider as BaseProvider
import requests
import json
def ProviderParser(subparser):
subparser.add_argument("--auth-username", help="specify api id used to authenticate")
subparser.add_argument("--auth-token", help="specify token used authenticate to DNS provider")
class Provider(BaseProvider):
def __init__(self, options, provider_options={}):
super(Provider, self).__init__(options)
self.domain_id = None
self.api_endpoint = provider_options.get('api_endpoint') or 'https://dnsapi.cn'
def authenticate(self):
payload = self._post('/Domain.Info', {'domain':self.options['domain']})
if payload['status']['code'] != '1':
raise StandardError(payload['status']['message'])
self.domain_id = payload['domain']['id']
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
record = {
'domain_id': self.domain_id,
'sub_domain': self._relative_name(name),
'record_type': type,
'record_line': '默认',
'value': content
}
payload = self._post('/Record.Create', record)
if payload['status']['code'] not in ['1', '31']:
raise StandardError(payload['status']['message'])
print 'create_record: {0}'.format(payload['status']['code'] == '1')
return payload['status']['code'] == '1'
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
filter = {}
payload = self._post('/Record.List', {'domain':self.options['domain']})
print payload
records = []
for record in payload['records']:
processed_record = {
'type': record['type'],
'name': self._full_name(record['name']),
'ttl': record['ttl'],
'content': record['value'],
#this id is useless unless your doing record linking. Lets return the original record identifier.
'id': record['id'] #
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'] == content]
print 'list_records: {0}'.format(records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {
'domain_id': self.domain_id,
'record_id': identifier,
'sub_domain': self._relative_name(name),
'record_type': type,
'record_line': '默认',
'value': content
}
print data
payload = self._post('/Record.Modify', data)
print payload
if payload['status']['code'] != '1':
raise StandardError(payload['status']['message'])
print 'update_record: {0}'.format(True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not identifier:
records = self.list_records(type, name, content)
print records
if len(records) == 1:
identifier = records[0]['id']
else:
raise StandardError('Record identifier could not be found.')
payload = self._post('/Record.Remove', {'domain_id': self.domain_id, 'record_id': identifier})
if payload['status']['code'] != '1':
raise StandardError(payload['status']['message'])
# is always True at this point, if a non 200 response is returned an error is raised.
print 'delete_record: {0}'.format(True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
data['login_token'] = self.options['auth_username'] + ',' + self.options['auth_token']
data['format'] = 'json'
if query_params is None:
query_params = {}
default_headers = {}
default_auth = None
r = requests.request(action, self.api_endpoint + url, params=query_params,
data=data,
headers=default_headers,
auth=default_auth)
r.raise_for_status() # if the request fails for any reason, throw an error.
return r.json()
<|code_end|>
|
Case should be ignored when filtering records from digitalocean API
I noticed that digitalocean's HTTP API always returns `data` field of TXT records in lowercase, see [example](https://gist.github.com/jokester/e1152fd90ace2f71d767af1d96d802dc).
This can cause [lexicon](https://github.com/AnalogJ/lexicon/blob/b278cebf3f0cfba720b87b6ff9415969d524edc2/lexicon/providers/digitalocean.py#L58) to filter out the correct record and throw `Record identifier could not be found`.
Also [this error in issue #35](https://github.com/AnalogJ/lexicon/issues/35#issuecomment-213105616) seems to be related.
| lexicon/providers/digitalocean.py
<|code_start|>from base import Provider as BaseProvider
import requests
import json
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify token used authenticate to DNS provider")
class Provider(BaseProvider):
def __init__(self, options, provider_options={}):
super(Provider, self).__init__(options)
self.domain_id = None
self.api_endpoint = provider_options.get('api_endpoint') or 'https://api.digitalocean.com/v2'
def authenticate(self):
payload = self._get('/domains/{0}'.format(self.options['domain']))
self.domain_id = self.options['domain']
def create_record(self, type, name, content):
record = {
'type': type,
'name': self._relative_name(name),
'data': content,
}
if type == 'CNAME':
record['data'] = record['data'].rstrip('.') + '.' # make sure a the data is always a FQDN for CNAMe.
payload = self._post('/domains/{0}/records'.format(self.domain_id), record)
print 'create_record: {0}'.format(True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
filter = {}
payload = self._get('/domains/{0}/records'.format(self.domain_id))
records = []
for record in payload['domain_records']:
processed_record = {
'type': record['type'],
'name': "{0}.{1}".format(record['name'], self.domain_id),
'ttl': '',
'content': record['data'],
'id': record['id']
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'] == content]
print 'list_records: {0}'.format(records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {}
if type:
data['type'] = type
if name:
data['name'] = self._relative_name(name)
if content:
data['data'] = content
payload = self._put('/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
print 'update_record: {0}'.format(True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not identifier:
records = self.list_records(type, name, content)
print records
if len(records) == 1:
identifier = records[0]['id']
else:
raise StandardError('Record identifier could not be found.')
payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, identifier))
# is always True at this point, if a non 200 response is returned an error is raised.
print 'delete_record: {0}'.format(True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))
}
r = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers=default_headers)
r.raise_for_status() # if the request fails for any reason, throw an error.
if action == 'DELETE':
return ''
else:
return r.json()
<|code_end|>
| lexicon/providers/digitalocean.py
<|code_start|>from base import Provider as BaseProvider
import requests
import json
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify token used authenticate to DNS provider")
class Provider(BaseProvider):
def __init__(self, options, provider_options={}):
super(Provider, self).__init__(options)
self.domain_id = None
self.api_endpoint = provider_options.get('api_endpoint') or 'https://api.digitalocean.com/v2'
def authenticate(self):
payload = self._get('/domains/{0}'.format(self.options['domain']))
self.domain_id = self.options['domain']
def create_record(self, type, name, content):
record = {
'type': type,
'name': self._relative_name(name),
'data': content,
}
if type == 'CNAME':
record['data'] = record['data'].rstrip('.') + '.' # make sure a the data is always a FQDN for CNAMe.
payload = self._post('/domains/{0}/records'.format(self.domain_id), record)
print 'create_record: {0}'.format(True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
filter = {}
payload = self._get('/domains/{0}/records'.format(self.domain_id))
records = []
for record in payload['domain_records']:
processed_record = {
'type': record['type'],
'name': "{0}.{1}".format(record['name'], self.domain_id),
'ttl': '',
'content': record['data'],
'id': record['id']
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'].lower() == content.lower()]
print 'list_records: {0}'.format(records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {}
if type:
data['type'] = type
if name:
data['name'] = self._relative_name(name)
if content:
data['data'] = content
payload = self._put('/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
print 'update_record: {0}'.format(True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not identifier:
records = self.list_records(type, name, content)
print records
if len(records) == 1:
identifier = records[0]['id']
else:
raise StandardError('Record identifier could not be found.')
payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, identifier))
# is always True at this point, if a non 200 response is returned an error is raised.
print 'delete_record: {0}'.format(True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))
}
r = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers=default_headers)
r.raise_for_status() # if the request fails for any reason, throw an error.
if action == 'DELETE':
return ''
else:
return r.json()
<|code_end|>
|
CloudXNS Support
yet another big DNS service in China.
| lexicon/providers/cloudxns.py
<|code_start|><|code_end|>
| lexicon/providers/cloudxns.py
<|code_start|># -*- coding: utf-8 -*-
from base import Provider as BaseProvider
import requests
import json
import time
import hashlib
import urllib
def ProviderParser(subparser):
subparser.add_argument("--auth-username", help="specify API-KEY used authenticate to DNS provider")
subparser.add_argument("--auth-token", help="specify SECRET-KEY used authenticate to DNS provider")
class Provider(BaseProvider):
def __init__(self, options, provider_options={}):
super(Provider, self).__init__(options)
self.domain_id = None
self.api_endpoint = provider_options.get('api_endpoint') or 'https://www.cloudxns.net/api2'
def authenticate(self):
payload = self._get('/domain')
for record in payload['data']:
if record['domain'] == self.options['domain']+'.':
self.domain_id = record['id']
break
if self.domain_id == None:
raise StandardError('No domain found')
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
record = {
'domain_id': self.domain_id,
'host': self._relative_name(name),
'value': content,
'type': type,
'line_id': 1
}
payload = self._post('/record', record)
print 'create_record: {0}'.format(True) # CloudXNS will return bad HTTP Status when error, will throw at r.raise_for_status() in _request()
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
filter = {}
payload = self._get('/record/' + self.domain_id, {'host_id':0, 'offset':0, 'row_num': 2000})
records = []
for record in payload['data']:
processed_record = {
'type': record['type'],
'name': self._full_name(record['host']),
'ttl': record['ttl'],
'content': record['value'],
#this id is useless unless your doing record linking. Lets return the original record identifier.
'id': record['record_id'] #
}
if processed_record['type'] == 'TXT':
processed_record['content'] = processed_record['content'].replace('"', '')
# CloudXNS will add quotes automaticly for TXT records, https://www.cloudxns.net/Support/detail/id/114.html
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'] == content]
print 'list_records: {0}'.format(records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
if not identifier:
records = self.list_records(name=name)
if len(records) == 1:
identifier = records[0]['id']
else:
raise StandardError('Record identifier could not be found.')
data = {
'domain_id': self.domain_id,
'host': self._relative_name(name),
'value': content,
'type': type
}
payload = self._put('/record/' + identifier, data)
print 'update_record: {0}'.format(True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not identifier:
records = self.list_records(type, name, content)
if len(records) == 1:
identifier = records[0]['id']
else:
raise StandardError('Record identifier could not be found.')
payload = self._delete('/record/' + identifier + '/' + self.domain_id)
# is always True at this point, if a non 200 response is returned an error is raised.
print 'delete_record: {0}'.format(True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
data['login_token'] = self.options['auth_username'] + ',' + self.options['auth_token']
data['format'] = 'json'
if query_params:
query_string = '?' + urllib.urlencode(query_params)
else:
query_string = ''
query_params = {}
if data:
data = json.dumps(data)
else:
data = ''
date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
default_headers = {
'API-KEY': self.options['auth_username'],
'API-REQUEST-DATE': date,
'API-HMAC': hashlib.md5(self.options['auth_username'] + self.api_endpoint + url + query_string + data + date + self.options['auth_token']).hexdigest(),
'API-FORMAT':'json'
}
default_auth = None
r = requests.request(action, self.api_endpoint + url, params=query_params,
data=data,
headers=default_headers,
auth=default_auth)
r.raise_for_status() # if the request fails for any reason, throw an error.
return r.json()
<|code_end|>
|
route53 provider and tests
AWS Route 53 provider for lexicon
Notes:
- haven't (yet?) added any command line args for auth - I always use env vars or credentials files, and the provider uses boto under the hood, so follows the same [credential lookup approach](http://boto3.readthedocs.io/en/latest/guide/configuration.html). Maybe we should have `--aws-access-key-id` etc, interested in other thoughts.
- skipped one of the tests since AWS record sets don't have IDs
- to create the vcr cassettes, I ran the tests using my own domain & AWS keys, and then did search & replace in the cassette files for the domain
- if one deletes the hosted zone (domain equivalent) in Route 53 within 12 hours, there's no charge, enabling free testing (in theory)
| lexicon/providers/route53.py
<|code_start|><|code_end|>
setup.py
<|code_start|>"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, listdir
version = 'unknown'
with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION')) as version_file:
version = version_file.read().strip()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get a list of all the providers
current_filepath = path.join(here, 'lexicon', 'providers')
providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
setup(
name='dns-lexicon',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
long_description=long_description,
# The project's main homepage.
url='https://github.com/AnalogJ/lexicon',
# Author details
author='Jason Kulatunga',
author_email='jason@thesparktree.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests', 'tldextract'],
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'lexicon=lexicon.__main__:main',
],
},
extras_require={
'transip': ['transip']
},
test_suite='tests'
)
<|code_end|>
| lexicon/providers/route53.py
<|code_start|>"""Provide support to Lexicon for AWS Route 53 DNS changes."""
from base import Provider as BaseProvider
import boto3
import botocore
def ProviderParser(subparser):
"""Specify arguments for AWS Route 53 Lexicon Provider."""
subparser.add_argument("--auth-access-key", help="specify ACCESS_KEY used to authenticate")
subparser.add_argument("--auth-access-secret", help="specify ACCESS_SECRET used authenticate")
#TODO: these are only required for testing, we should figure out a way to remove them & update the integration tests
# to dynamically populate the auth credentials that are required.
subparser.add_argument("--auth-username", help="alternative way to specify ACCESS_KEY used to authenticate")
subparser.add_argument("--auth-token", help="alternative way to specify ACCESS_SECRET used authenticate")
class RecordSetPaginator(object):
"""Paginate through complete list of record sets."""
def __init__(self, r53_client, hosted_zone_id, max_items=None):
"""Initialize paginator."""
self.r53_client = r53_client
self.hosted_zone_id = hosted_zone_id
self.max_items = max_items
def get_record_sets(self, **kwargs):
"""Retrieve a page from API."""
return self.r53_client.list_resource_record_sets(**kwargs)
def get_base_kwargs(self):
"""Get base kwargs for API call."""
kwargs = {
'HostedZoneId': self.hosted_zone_id
}
if self.max_items is not None:
kwargs.update({
'MaxItems': str(self.max_items)
})
return kwargs
def all_record_sets(self):
"""Generator to loop through current record set.
Call next page if it exists.
"""
is_truncated = True
start_record_name = None
start_record_type = None
kwargs = self.get_base_kwargs()
while is_truncated:
if start_record_name is not None:
kwargs.update({
'StartRecordName': start_record_name,
'StartRecordType': start_record_type
})
result = self.get_record_sets(**kwargs)
for record_set in result.get('ResourceRecordSets', []):
yield record_set
is_truncated = result.get('IsTruncated', False)
start_record_name = result.get('NextRecordName', None)
start_record_type = result.get('NextRecordType', None)
class Provider(BaseProvider):
"""Provide AWS Route 53 implementation of Lexicon Provider interface."""
def __init__(self, options, provider_options={}):
"""Initialize AWS Route 53 DNS provider."""
super(Provider, self).__init__(options)
self.domain_id = None
# instantiate the client
self.r53_client = boto3.client(
'route53',
aws_access_key_id=self.options.get('auth_access_key', self.options.get('auth_username')),
aws_secret_access_key=self.options.get('auth_access_secret', self.options.get('auth_token'))
)
def authenticate(self):
"""Determine the hosted zone id for the domain."""
try:
hosted_zones = self.r53_client.list_hosted_zones_by_name()[
'HostedZones'
]
hosted_zone = next(
hz for hz in hosted_zones
if hz['Name'] == '{}.'.format(self.options['domain'])
)
self.domain_id = hosted_zone['Id']
except StopIteration:
raise StandardError('No domain found')
def _change_record_sets(self, action, type, name, content):
ttl = self.options.get('ttl')
value = '"{}"'.format(content) if type in ['TXT', 'SPF'] else content
try:
self.r53_client.change_resource_record_sets(
HostedZoneId=self.domain_id,
ChangeBatch={
'Comment': '{} using lexicon Route 53 provider'.format(
action
),
'Changes': [
{
'Action': action,
'ResourceRecordSet': {
'Name': self._fqdn_name(name),
'Type': type,
'TTL': ttl if ttl is not None else 300,
'ResourceRecords': [
{
'Value': value
}
]
}
}
]
}
)
return True
except botocore.exceptions.ClientError as e:
print e.message
def create_record(self, type, name, content):
"""Create a record in the hosted zone."""
return self._change_record_sets('CREATE', type, name, content)
def update_record(self, identifier=None, type=None, name=None, content=None):
"""Update a record from the hosted zone."""
return self._change_record_sets('UPSERT', type, name, content)
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""Delete a record from the hosted zone."""
return self._change_record_sets('DELETE', type, name, content)
def _format_content(self, type, content):
return content[1:-1] if type in ['TXT', 'SPF'] else content
def list_records(self, type=None, name=None, content=None):
"""List all records for the hosted zone."""
records = []
paginator = RecordSetPaginator(self.r53_client, self.domain_id)
for record in paginator.all_record_sets():
if type is not None and record['Type'] != type:
continue
if name is not None and record['Name'] != self._fqdn_name(name):
continue
if record.get('AliasTarget', None) is not None:
record_content = [record['AliasTarget'].get('DNSName', None)]
if record.get('ResourceRecords', None) is not None:
record_content = [self._format_content(record['Type'], value['Value']) for value
in record['ResourceRecords']]
if content is not None and content not in record_content:
continue
print record
records.append({
'type': record['Type'],
'name': self._full_name(record['Name']),
'ttl': record.get('TTL', None),
'content': record_content[0] if len(record_content) == 1 else record_content,
})
print 'list_records: {0}'.format(records)
return records
<|code_end|>
setup.py
<|code_start|>"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, listdir
version = 'unknown'
with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION')) as version_file:
version = version_file.read().strip()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get a list of all the providers
current_filepath = path.join(here, 'lexicon', 'providers')
providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
setup(
name='dns-lexicon',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
long_description=long_description,
# The project's main homepage.
url='https://github.com/AnalogJ/lexicon',
# Author details
author='Jason Kulatunga',
author_email='jason@thesparktree.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests', 'tldextract'],
extras_require={
'route53': ['boto3'],
'transip': ['transip']
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'lexicon=lexicon.__main__:main',
],
},
test_suite='tests'
)
<|code_end|>
|
Gandi Records not picking up TTL
I'm trying to renew Lets Encrypt Certs using dehydrated, using lexicon to handle DNS modifications on a Gandi domain. New challenge TXT records are being created with a TTL of 3 hours. I have tried setting --ttl manually but they're always created with the default 3 hour TTL:
`lexicon gandi create platformservices.io TXT --name="testsubdomain.platformservices.io" --content="TEST" --ttl="120"`
Looks to me like the create_record method is missing the ttl property:
https://github.com/AnalogJ/lexicon/blob/master/lexicon/providers/gandi.py#L93
| lexicon/providers/gandi.py
<|code_start|>"""Provide support to Lexicon for Gandi DNS changes.
Lexicon provides a common interface for querying and managing DNS services
through those services' APIs. This module implements the Lexicon interface
against the Gandi API.
The Gandi API is different from typical DNS APIs in that Gandi
zone changes are atomic. You cannot edit the currently active
configuration. Any changes require editing either a new or inactive
configuration. Once the changes are committed, then the domain is switched
to using the new zone configuration. This module makes no attempt to
cleanup previous zone configurations.
Note that Gandi domains can share zone configurations. In other words,
I can have domain-a.com and domain-b.com which share the same zone
configuration file. If I make changes to domain-a.com, those changes
will only apply to domain-a.com, as domain-b.com will continue using
the previous version of the zone configuration. This module makes no
attempt to detect and account for that.
"""
import xmlrpclib
from lexicon.providers.base import Provider as BaseProvider
def ProviderParser(subparser):
"""Specify arguments for Gandi Lexicon Provider."""
subparser.add_argument('--auth-token', help="specify Gandi API key")
class Provider(BaseProvider):
"""Provide Gandi DNS API implementation of Lexicon Provider interface.
The class will use the following environment variables to configure
it instance. For more information, read the Lexicon documentation.
- LEXICON_GANDI_API_ENDPOINT - the Gandi API endpoint to use
The default is the production URL https://rpc.gandi.net/xmlrpc/.
Set this environment variable to the OT&E URL for testing.
"""
def __init__(self, options, provider_options=None):
"""Initialize Gandi DNS provider."""
super(Provider, self).__init__(options)
if provider_options is None:
provider_options = {}
api_endpoint = provider_options.get('api_endpoint') or 'https://rpc.gandi.net/xmlrpc/'
self.apikey = self.options['auth_token']
self.api = xmlrpclib.ServerProxy(api_endpoint)
# self.domain_id is required by test suite
self.domain_id = None
self.zone_id = None
self.domain = self.options['domain'].lower()
# Authenicate against provider,
# Make any requests required to get the domain's id for this provider,
# so it can be used in subsequent calls. Should throw an error if
# authentication fails for any reason, or if the domain does not exist.
def authenticate(self):
"""Determine the current domain and zone IDs for the domain."""
try:
payload = self.api.domain.info(self.apikey, self.domain)
self.domain_id = payload['id']
self.zone_id = payload['zone_id']
except xmlrpclib.Fault as err:
raise StandardError("Failed to authenticate: '{0}'".format(err))
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
"""Creates a record for the domain in a new Gandi zone."""
version = None
ret = False
name = self._canonicalize_name(name)
# This isn't quite "do nothing" if the record already exists.
# In this case, no new record will be created, but a new zone version
# will be created and set.
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
self.api.domain.zone.record.add(self.apikey, self.zone_id, version,
{'type': type.upper(),
'name': name,
'value': content})
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "create_record: {0}".format(ret)
return ret
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
opts = {}
if type is not None:
opts['type'] = type.upper()
if name is not None:
opts['name'] = self._canonicalize_name(name)
if content is not None:
opts['value'] = self._txt_encode(content) if opts.get('type', '') == 'TXT' else content
records = []
payload = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
for record in payload:
processed_record = {
'type': record['type'],
'name': self._fqdn(record['name']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
# Gandi will add quotes to all TXT record strings
if processed_record['type'] == 'TXT':
processed_record['content'] = self._txt_decode(processed_record['content'])
records.append(processed_record)
print "list_records: {0}".format(records)
return records
# Update a record. Identifier must be specified.
def update_record(self, identifier, type=None, name=None, content=None):
"""Updates the specified record in a new Gandi zone."""
identifier = int(identifier)
version = None
# Gandi doesn't allow you to edit records on the active zone file.
# Gandi also doesn't persist zone record identifiers when creating
# a new zone file. To update by identifier, we lookup the record
# by identifier, then use the record fields to find the record in
# the newly created zone.
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, {'id': identifier})
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, version, rec)
if len(records) != 1:
raise GandiInternalError("expected one record")
if type is not None:
rec['type'] = type.upper()
if name is not None:
rec['name'] = self._canonicalize_name(name)
if content is not None:
rec['value'] = self._txt_encode(content) if rec['type'] == 'TXT' else content
records = self.api.domain.zone.record.update(self.apikey,
self.zone_id,
version,
{'id': records[0]['id']},
rec)
if len(records) != 1:
raise GandiInternalError("expected one updated record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "update_record: {0}".format(ret)
return ret
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""Removes the specified record in a new Gandi zone."""
version = None
ret = False
opts = {}
if identifier is not None:
opts['id'] = identifier
else:
opts['type'] = type.upper()
opts['name'] = self._canonicalize_name(name)
opts["value"] = self._txt_encode(content) if opts['type'] == 'TXT' else content
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
cnt = self.api.domain.zone.record.delete(self.apikey, self.zone_id, version, rec)
if cnt != 1:
raise GandiInternalError("expected one deleted record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "delete_record: {0}".format(ret)
return ret
def _fqdn(self, name):
if not name.endswith('.') and not name.endswith('.{0}'.format(self.domain)):
name += '.{0}'.format(self.domain)
return name
def _canonicalize_name(self, name):
name = name.lower()
if name.endswith('.{0}.'.format(self.domain)):
name = name[:-1]
if name.endswith('.{0}'.format(self.domain)):
name = name[:-(len(self.domain) + 1)]
return name
@staticmethod
def _txt_encode(val):
return ''.join(['"', val.replace('\\', '\\\\').replace('"', '\\"'), '"'])
@staticmethod
def _txt_decode(val):
if len(val) > 1 and val[0:1] == '"':
val = val[1:-1].replace('" "', '').replace('\\"', '"').replace('\\\\', '\\')
return val
# This exception is for cleaner handling of internal errors
# within the Gandi provider codebase
class GandiInternalError(Exception):
"""Internal exception handling class for Gandi management errors"""
pass
<|code_end|>
| lexicon/providers/gandi.py
<|code_start|>"""Provide support to Lexicon for Gandi DNS changes.
Lexicon provides a common interface for querying and managing DNS services
through those services' APIs. This module implements the Lexicon interface
against the Gandi API.
The Gandi API is different from typical DNS APIs in that Gandi
zone changes are atomic. You cannot edit the currently active
configuration. Any changes require editing either a new or inactive
configuration. Once the changes are committed, then the domain is switched
to using the new zone configuration. This module makes no attempt to
cleanup previous zone configurations.
Note that Gandi domains can share zone configurations. In other words,
I can have domain-a.com and domain-b.com which share the same zone
configuration file. If I make changes to domain-a.com, those changes
will only apply to domain-a.com, as domain-b.com will continue using
the previous version of the zone configuration. This module makes no
attempt to detect and account for that.
"""
import xmlrpclib
from lexicon.providers.base import Provider as BaseProvider
def ProviderParser(subparser):
"""Specify arguments for Gandi Lexicon Provider."""
subparser.add_argument('--auth-token', help="specify Gandi API key")
class Provider(BaseProvider):
"""Provide Gandi DNS API implementation of Lexicon Provider interface.
The class will use the following environment variables to configure
it instance. For more information, read the Lexicon documentation.
- LEXICON_GANDI_API_ENDPOINT - the Gandi API endpoint to use
The default is the production URL https://rpc.gandi.net/xmlrpc/.
Set this environment variable to the OT&E URL for testing.
"""
def __init__(self, options, provider_options=None):
"""Initialize Gandi DNS provider."""
super(Provider, self).__init__(options)
if provider_options is None:
provider_options = {}
api_endpoint = provider_options.get('api_endpoint') or 'https://rpc.gandi.net/xmlrpc/'
self.apikey = self.options['auth_token']
self.api = xmlrpclib.ServerProxy(api_endpoint)
# self.domain_id is required by test suite
self.domain_id = None
self.zone_id = None
self.domain = self.options['domain'].lower()
# Authenicate against provider,
# Make any requests required to get the domain's id for this provider,
# so it can be used in subsequent calls. Should throw an error if
# authentication fails for any reason, or if the domain does not exist.
def authenticate(self):
"""Determine the current domain and zone IDs for the domain."""
try:
payload = self.api.domain.info(self.apikey, self.domain)
self.domain_id = payload['id']
self.zone_id = payload['zone_id']
except xmlrpclib.Fault as err:
raise StandardError("Failed to authenticate: '{0}'".format(err))
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
"""Creates a record for the domain in a new Gandi zone."""
version = None
ret = False
name = self._canonicalize_name(name)
# This isn't quite "do nothing" if the record already exists.
# In this case, no new record will be created, but a new zone version
# will be created and set.
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
self.api.domain.zone.record.add(self.apikey, self.zone_id, version,
{'type': type.upper(),
'name': name,
'value': content,
'ttl': self.options.get('ttl',self.default_ttl)
})
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "create_record: {0}".format(ret)
return ret
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
opts = {}
if type is not None:
opts['type'] = type.upper()
if name is not None:
opts['name'] = self._canonicalize_name(name)
if content is not None:
opts['value'] = self._txt_encode(content) if opts.get('type', '') == 'TXT' else content
records = []
payload = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
for record in payload:
processed_record = {
'type': record['type'],
'name': self._fqdn(record['name']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
# Gandi will add quotes to all TXT record strings
if processed_record['type'] == 'TXT':
processed_record['content'] = self._txt_decode(processed_record['content'])
records.append(processed_record)
print "list_records: {0}".format(records)
return records
# Update a record. Identifier must be specified.
def update_record(self, identifier, type=None, name=None, content=None):
"""Updates the specified record in a new Gandi zone."""
identifier = int(identifier)
version = None
# Gandi doesn't allow you to edit records on the active zone file.
# Gandi also doesn't persist zone record identifiers when creating
# a new zone file. To update by identifier, we lookup the record
# by identifier, then use the record fields to find the record in
# the newly created zone.
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, {'id': identifier})
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, version, rec)
if len(records) != 1:
raise GandiInternalError("expected one record")
if type is not None:
rec['type'] = type.upper()
if name is not None:
rec['name'] = self._canonicalize_name(name)
if content is not None:
rec['value'] = self._txt_encode(content) if rec['type'] == 'TXT' else content
records = self.api.domain.zone.record.update(self.apikey,
self.zone_id,
version,
{'id': records[0]['id']},
rec)
if len(records) != 1:
raise GandiInternalError("expected one updated record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "update_record: {0}".format(ret)
return ret
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""Removes the specified record in a new Gandi zone."""
version = None
ret = False
opts = {}
if identifier is not None:
opts['id'] = identifier
else:
opts['type'] = type.upper()
opts['name'] = self._canonicalize_name(name)
opts["value"] = self._txt_encode(content) if opts['type'] == 'TXT' else content
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
cnt = self.api.domain.zone.record.delete(self.apikey, self.zone_id, version, rec)
if cnt != 1:
raise GandiInternalError("expected one deleted record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "delete_record: {0}".format(ret)
return ret
def _fqdn(self, name):
if not name.endswith('.') and not name.endswith('.{0}'.format(self.domain)):
name += '.{0}'.format(self.domain)
return name
def _canonicalize_name(self, name):
name = name.lower()
if name.endswith('.{0}.'.format(self.domain)):
name = name[:-1]
if name.endswith('.{0}'.format(self.domain)):
name = name[:-(len(self.domain) + 1)]
return name
@staticmethod
def _txt_encode(val):
return ''.join(['"', val.replace('\\', '\\\\').replace('"', '\\"'), '"'])
@staticmethod
def _txt_decode(val):
if len(val) > 1 and val[0:1] == '"':
val = val[1:-1].replace('" "', '').replace('\\"', '"').replace('\\\\', '\\')
return val
# This exception is for cleaner handling of internal errors
# within the Gandi provider codebase
class GandiInternalError(Exception):
"""Internal exception handling class for Gandi management errors"""
pass
<|code_end|>
|
Gandi Records not picking up TTL
I'm trying to renew Lets Encrypt Certs using dehydrated, using lexicon to handle DNS modifications on a Gandi domain. New challenge TXT records are being created with a TTL of 3 hours. I have tried setting --ttl manually but they're always created with the default 3 hour TTL:
`lexicon gandi create platformservices.io TXT --name="testsubdomain.platformservices.io" --content="TEST" --ttl="120"`
Looks to me like the create_record method is missing the ttl property:
https://github.com/AnalogJ/lexicon/blob/master/lexicon/providers/gandi.py#L93
| lexicon/providers/gandi.py
<|code_start|>"""Provide support to Lexicon for Gandi DNS changes.
Lexicon provides a common interface for querying and managing DNS services
through those services' APIs. This module implements the Lexicon interface
against the Gandi API.
The Gandi API is different from typical DNS APIs in that Gandi
zone changes are atomic. You cannot edit the currently active
configuration. Any changes require editing either a new or inactive
configuration. Once the changes are committed, then the domain is switched
to using the new zone configuration. This module makes no attempt to
cleanup previous zone configurations.
Note that Gandi domains can share zone configurations. In other words,
I can have domain-a.com and domain-b.com which share the same zone
configuration file. If I make changes to domain-a.com, those changes
will only apply to domain-a.com, as domain-b.com will continue using
the previous version of the zone configuration. This module makes no
attempt to detect and account for that.
"""
import xmlrpclib
from lexicon.providers.base import Provider as BaseProvider
def ProviderParser(subparser):
"""Specify arguments for Gandi Lexicon Provider."""
subparser.add_argument('--auth-token', help="specify Gandi API key")
class Provider(BaseProvider):
"""Provide Gandi DNS API implementation of Lexicon Provider interface.
The class will use the following environment variables to configure
it instance. For more information, read the Lexicon documentation.
- LEXICON_GANDI_API_ENDPOINT - the Gandi API endpoint to use
The default is the production URL https://rpc.gandi.net/xmlrpc/.
Set this environment variable to the OT&E URL for testing.
"""
def __init__(self, options, provider_options=None):
"""Initialize Gandi DNS provider."""
super(Provider, self).__init__(options)
if provider_options is None:
provider_options = {}
api_endpoint = provider_options.get('api_endpoint') or 'https://rpc.gandi.net/xmlrpc/'
self.apikey = self.options['auth_token']
self.api = xmlrpclib.ServerProxy(api_endpoint)
# self.domain_id is required by test suite
self.domain_id = None
self.zone_id = None
self.domain = self.options['domain'].lower()
# Authenicate against provider,
# Make any requests required to get the domain's id for this provider,
# so it can be used in subsequent calls. Should throw an error if
# authentication fails for any reason, or if the domain does not exist.
def authenticate(self):
"""Determine the current domain and zone IDs for the domain."""
try:
payload = self.api.domain.info(self.apikey, self.domain)
self.domain_id = payload['id']
self.zone_id = payload['zone_id']
except xmlrpclib.Fault as err:
raise StandardError("Failed to authenticate: '{0}'".format(err))
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
"""Creates a record for the domain in a new Gandi zone."""
version = None
ret = False
name = self._canonicalize_name(name)
# This isn't quite "do nothing" if the record already exists.
# In this case, no new record will be created, but a new zone version
# will be created and set.
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
self.api.domain.zone.record.add(self.apikey, self.zone_id, version,
{'type': type.upper(),
'name': name,
'value': content,
'ttl': self.options.get('ttl',self.default_ttl)
})
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "create_record: {0}".format(ret)
return ret
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
opts = {}
if type is not None:
opts['type'] = type.upper()
if name is not None:
opts['name'] = self._canonicalize_name(name)
if content is not None:
opts['value'] = self._txt_encode(content) if opts.get('type', '') == 'TXT' else content
records = []
payload = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
for record in payload:
processed_record = {
'type': record['type'],
'name': self._fqdn(record['name']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
# Gandi will add quotes to all TXT record strings
if processed_record['type'] == 'TXT':
processed_record['content'] = self._txt_decode(processed_record['content'])
records.append(processed_record)
print "list_records: {0}".format(records)
return records
# Update a record. Identifier must be specified.
def update_record(self, identifier, type=None, name=None, content=None):
"""Updates the specified record in a new Gandi zone."""
identifier = int(identifier)
version = None
# Gandi doesn't allow you to edit records on the active zone file.
# Gandi also doesn't persist zone record identifiers when creating
# a new zone file. To update by identifier, we lookup the record
# by identifier, then use the record fields to find the record in
# the newly created zone.
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, {'id': identifier})
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, version, rec)
if len(records) != 1:
raise GandiInternalError("expected one record")
if type is not None:
rec['type'] = type.upper()
if name is not None:
rec['name'] = self._canonicalize_name(name)
if content is not None:
rec['value'] = self._txt_encode(content) if rec['type'] == 'TXT' else content
records = self.api.domain.zone.record.update(self.apikey,
self.zone_id,
version,
{'id': records[0]['id']},
rec)
if len(records) != 1:
raise GandiInternalError("expected one updated record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "update_record: {0}".format(ret)
return ret
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""Removes the specified record in a new Gandi zone."""
version = None
ret = False
opts = {}
if identifier is not None:
opts['id'] = identifier
else:
opts['type'] = type.upper()
opts['name'] = self._canonicalize_name(name)
opts["value"] = self._txt_encode(content) if opts['type'] == 'TXT' else content
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
cnt = self.api.domain.zone.record.delete(self.apikey, self.zone_id, version, rec)
if cnt != 1:
raise GandiInternalError("expected one deleted record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "delete_record: {0}".format(ret)
return ret
def _fqdn(self, name):
if not name.endswith('.') and not name.endswith('.{0}'.format(self.domain)):
name += '.{0}'.format(self.domain)
return name
def _canonicalize_name(self, name):
name = name.lower()
if name.endswith('.{0}.'.format(self.domain)):
name = name[:-1]
if name.endswith('.{0}'.format(self.domain)):
name = name[:-(len(self.domain) + 1)]
return name
@staticmethod
def _txt_encode(val):
return ''.join(['"', val.replace('\\', '\\\\').replace('"', '\\"'), '"'])
@staticmethod
def _txt_decode(val):
if len(val) > 1 and val[0:1] == '"':
val = val[1:-1].replace('" "', '').replace('\\"', '"').replace('\\\\', '\\')
return val
# This exception is for cleaner handling of internal errors
# within the Gandi provider codebase
class GandiInternalError(Exception):
"""Internal exception handling class for Gandi management errors"""
pass
<|code_end|>
| lexicon/providers/gandi.py
<|code_start|>"""Provide support to Lexicon for Gandi DNS changes.
Lexicon provides a common interface for querying and managing DNS services
through those services' APIs. This module implements the Lexicon interface
against the Gandi API.
The Gandi API is different from typical DNS APIs in that Gandi
zone changes are atomic. You cannot edit the currently active
configuration. Any changes require editing either a new or inactive
configuration. Once the changes are committed, then the domain is switched
to using the new zone configuration. This module makes no attempt to
cleanup previous zone configurations.
Note that Gandi domains can share zone configurations. In other words,
I can have domain-a.com and domain-b.com which share the same zone
configuration file. If I make changes to domain-a.com, those changes
will only apply to domain-a.com, as domain-b.com will continue using
the previous version of the zone configuration. This module makes no
attempt to detect and account for that.
"""
import xmlrpclib
from lexicon.providers.base import Provider as BaseProvider
def ProviderParser(subparser):
"""Specify arguments for Gandi Lexicon Provider."""
subparser.add_argument('--auth-token', help="specify Gandi API key")
class Provider(BaseProvider):
"""Provide Gandi DNS API implementation of Lexicon Provider interface.
The class will use the following environment variables to configure
it instance. For more information, read the Lexicon documentation.
- LEXICON_GANDI_API_ENDPOINT - the Gandi API endpoint to use
The default is the production URL https://rpc.gandi.net/xmlrpc/.
Set this environment variable to the OT&E URL for testing.
"""
def __init__(self, options, provider_options=None):
"""Initialize Gandi DNS provider."""
super(Provider, self).__init__(options)
if provider_options is None:
provider_options = {}
api_endpoint = provider_options.get('api_endpoint') or 'https://rpc.gandi.net/xmlrpc/'
self.apikey = self.options['auth_token']
self.api = xmlrpclib.ServerProxy(api_endpoint)
# self.domain_id is required by test suite
self.domain_id = None
self.zone_id = None
self.domain = self.options['domain'].lower()
# Authenicate against provider,
# Make any requests required to get the domain's id for this provider,
# so it can be used in subsequent calls. Should throw an error if
# authentication fails for any reason, or if the domain does not exist.
def authenticate(self):
"""Determine the current domain and zone IDs for the domain."""
try:
payload = self.api.domain.info(self.apikey, self.domain)
self.domain_id = payload['id']
self.zone_id = payload['zone_id']
except xmlrpclib.Fault as err:
raise StandardError("Failed to authenticate: '{0}'".format(err))
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
"""Creates a record for the domain in a new Gandi zone."""
version = None
ret = False
name = self._canonicalize_name(name)
# This isn't quite "do nothing" if the record already exists.
# In this case, no new record will be created, but a new zone version
# will be created and set.
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
self.api.domain.zone.record.add(self.apikey, self.zone_id, version,
{'type': type.upper(),
'name': name,
'value': content,
'ttl': self.options.get('ttl') or self.default_ttl
})
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "create_record: {0}".format(ret)
return ret
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
opts = {}
if type is not None:
opts['type'] = type.upper()
if name is not None:
opts['name'] = self._canonicalize_name(name)
if content is not None:
opts['value'] = self._txt_encode(content) if opts.get('type', '') == 'TXT' else content
records = []
payload = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
for record in payload:
processed_record = {
'type': record['type'],
'name': self._fqdn(record['name']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
# Gandi will add quotes to all TXT record strings
if processed_record['type'] == 'TXT':
processed_record['content'] = self._txt_decode(processed_record['content'])
records.append(processed_record)
print "list_records: {0}".format(records)
return records
# Update a record. Identifier must be specified.
def update_record(self, identifier, type=None, name=None, content=None):
"""Updates the specified record in a new Gandi zone."""
identifier = int(identifier)
version = None
# Gandi doesn't allow you to edit records on the active zone file.
# Gandi also doesn't persist zone record identifiers when creating
# a new zone file. To update by identifier, we lookup the record
# by identifier, then use the record fields to find the record in
# the newly created zone.
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, {'id': identifier})
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, version, rec)
if len(records) != 1:
raise GandiInternalError("expected one record")
if type is not None:
rec['type'] = type.upper()
if name is not None:
rec['name'] = self._canonicalize_name(name)
if content is not None:
rec['value'] = self._txt_encode(content) if rec['type'] == 'TXT' else content
records = self.api.domain.zone.record.update(self.apikey,
self.zone_id,
version,
{'id': records[0]['id']},
rec)
if len(records) != 1:
raise GandiInternalError("expected one updated record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "update_record: {0}".format(ret)
return ret
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""Removes the specified record in a new Gandi zone."""
version = None
ret = False
opts = {}
if identifier is not None:
opts['id'] = identifier
else:
opts['type'] = type.upper()
opts['name'] = self._canonicalize_name(name)
opts["value"] = self._txt_encode(content) if opts['type'] == 'TXT' else content
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
cnt = self.api.domain.zone.record.delete(self.apikey, self.zone_id, version, rec)
if cnt != 1:
raise GandiInternalError("expected one deleted record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
print "delete_record: {0}".format(ret)
return ret
def _fqdn(self, name):
if not name.endswith('.') and not name.endswith('.{0}'.format(self.domain)):
name += '.{0}'.format(self.domain)
return name
def _canonicalize_name(self, name):
name = name.lower()
if name.endswith('.{0}.'.format(self.domain)):
name = name[:-1]
if name.endswith('.{0}'.format(self.domain)):
name = name[:-(len(self.domain) + 1)]
return name
@staticmethod
def _txt_encode(val):
return ''.join(['"', val.replace('\\', '\\\\').replace('"', '\\"'), '"'])
@staticmethod
def _txt_decode(val):
if len(val) > 1 and val[0:1] == '"':
val = val[1:-1].replace('" "', '').replace('\\"', '"').replace('\\\\', '\\')
return val
# This exception is for cleaner handling of internal errors
# within the Gandi provider codebase
class GandiInternalError(Exception):
"""Internal exception handling class for Gandi management errors"""
pass
<|code_end|>
|
Yandex PDD provider
Hi
I create support Yandex PDD as a DNS provider.
But, I skip test because I haven't ideas to implementation it.
@AnalogJ please merge if it possible.
| lexicon/providers/yandex.py
<|code_start|><|code_end|>
| lexicon/providers/yandex.py
<|code_start|>from __future__ import print_function
from __future__ import absolute_import
from .base import Provider as BaseProvider
import requests
import json
__author__ = 'Aliaksandr Kharkevich'
__license__ = 'MIT'
__contact__ = 'https://github.com/kharkevich'
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify PDD token (https://tech.yandex.com/domain/doc/concepts/access-docpage/)")
class Provider(BaseProvider):
def __init__(self, options, provider_options={}):
super(Provider, self).__init__(options)
self.domain_id = None
self.api_endpoint = provider_options.get('api_endpoint') or 'https://pddimp.yandex.ru/api2/admin/dns'
def authenticate(self):
payload = self._get('/list?domain={0}'.format(self.options['domain']))
if payload['success'] != "ok":
raise Exception('No domain found')
self.domain_id = self.options['domain']
def create_record(self, type, name, content):
if (type == 'CNAME') or (type == 'MX') or (type == 'NS'):
content = content.rstrip('.') + '.' # make sure a the data is always a FQDN for CNAMe.
querystring = 'domain={0}&type={1}&subdomain={2}&content={3}'.format(self.domain_id, type, self._relative_name(name), content)
if self.options.get('ttl'):
querystring += "&ttl={0}".format(self.options.get('ttl'))
payload = self._post('/add', {},querystring)
return self._check_exitcode(payload, 'create_record')
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
url = '/list?domain={0}'.format(self.domain_id)
records = []
payload = {}
next = url
while next is not None:
payload = self._get(next)
if 'links' in payload \
and 'pages' in payload['links'] \
and 'next' in payload['links']['pages']:
next = payload['links']['pages']['next']
else:
next = None
for record in payload['records']:
processed_record = {
'type': record['type'],
'name': "{0}.{1}".format(record['subdomain'], self.domain_id),
'ttl': record['ttl'],
'content': record['content'],
'id': record['record_id']
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'].lower() == content.lower()]
print('list_records: {0}'.format(records))
return records
# Just update existing record. Domain ID (domain) and Identifier (record_id) is mandatory
def update_record(self, identifier, type=None, name=None, content=None):
if not identifier:
print('Domain ID (domain) and Identifier (record_id) is mandatory parameters for this case')
return False
data = ''
if type:
data += '&type={0}'.format(type)
if name:
data += '&subdomain={0}'.format(self._relative_name(name))
if content:
data += '&content={0}'.format(content)
payload = self._post('/edit', {}, 'domain={0}&record_id={1}'.format(self.domain_id, identifier) + data)
return self._check_exitcode(payload, 'update_record')
# Delete an existing record.
# If record does not exist (I'll hope), do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not identifier:
records = self.list_records(type, name, content)
print(records)
if len(records) == 1:
identifier = records[0]['id']
else:
raise Exception('Record identifier could not be found.')
payload = self._post('/del', {}, 'domain={0}&record_id={1}'.format(self.domain_id, identifier))
return self._check_exitcode(payload, 'delete_record')
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'PddToken': self.options.get('auth_token')
}
if not url.startswith(self.api_endpoint):
url = self.api_endpoint + url
r = requests.request(action, url, params=query_params,
data=json.dumps(data),
headers=default_headers)
r.raise_for_status() # if the request fails for any reason, throw an error.
if action == 'DELETE':
return ''
else:
return r.json()
def _check_exitcode(self, payload, title):
if payload['success'] == 'ok':
print('{0}: {1}'.format(title, payload['success']))
return True
else:
print('{0}: {1}'.format(title, payload['error']))
return False
<|code_end|>
|
change transip provider to use transip-api
I noticed the transip provider was gone, and that there was a [new project](https://github.com/benkonrath/transip-api) providing a transip api.
I'm not sure how to specify the dependency from github, might be useful to put transip-api on pypi. Otherwise, command line usage seems to work and the tests pass. I haven't tried using it with dehydrated yet.
| lexicon/providers/transip.py
<|code_start|><|code_end|>
setup.py
<|code_start|>"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, listdir
version = 'unknown'
with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION'), encoding='utf-8') as version_file:
version = version_file.read().strip()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get a list of all the providers
current_filepath = path.join(here, 'lexicon', 'providers')
providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
setup(
name='dns-lexicon',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
long_description=long_description,
# The project's main homepage.
url='https://github.com/AnalogJ/lexicon',
# Author details
author='Jason Kulatunga',
author_email='jason@thesparktree.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests', 'tldextract', 'future'],
extras_require={
'route53': ['boto3']
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'lexicon=lexicon.__main__:main',
],
},
test_suite='tests'
)
<|code_end|>
| lexicon/providers/transip.py
<|code_start|>from __future__ import print_function
from __future__ import absolute_import
from .base import Provider as BaseProvider
try:
from transip.service.dns import DnsEntry
from transip.service.domain import DomainService
except ImportError:
pass
def ProviderParser(subparser):
subparser.add_argument("--auth-username", help="specify username used to authenticate")
subparser.add_argument("--auth-api-key", help="specify API private key to authenticate")
class Provider(BaseProvider):
def __init__(self, options, provider_options={}):
super(Provider, self).__init__(options)
self.options.update(provider_options)
self.provider_name = 'transip'
self.domain_id = None
username = self.options.get('auth_username')
key_file = self.options.get('auth_api_key')
if not username or not key_file:
raise Exception("No username and/or keyfile was specified")
self.client = DomainService(
login=username,
private_key_file=key_file
)
# Authenticate against provider,
# Make any requests required to get the domain's id for this provider, so it can be used in subsequent calls.
# Should throw an error if authentication fails for any reason, of if the domain does not exist.
def authenticate(self):
## This request will fail when the domain does not exist,
## allowing us to check for existence
domain = self.options.get('domain')
try:
self.client.get_info(domain)
except:
raise
raise Exception("Could not retrieve information about {0}, "
"is this domain yours?".format(domain))
self.domain_id = domain
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
records = self.client.get_info(self.options.get('domain')).dnsEntries
if self._filter_records(records, type, name, content):
# Nothing to do, record already exists
print('create_record: already exists')
return True
records.append(DnsEntry(**{
"name": self._relative_name(name),
"record_type": type,
"content": self._bind_format_target(type, content),
"expire": self.options.get('ttl') or 86400
}))
self.client.set_dns_entries(self.options.get('domain'), records)
status = len(self.list_records(type, name, content, show_output=False)) >= 1
print("create_record: {0}".format(status))
return status
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None, show_output=True):
all_records = self._convert_records(self.client.get_info(self.options.get('domain')).dnsEntries)
records = self._filter_records(
records=all_records,
type=type,
name=name,
content=content
)
if show_output:
print('list_records: {0}'.format(records))
return records
# Update a record. Identifier must be specified.
def update_record(self, identifier=None, type=None, name=None, content=None):
if not (type or name or content):
raise Exception("At least one of type, name or content must be specified.")
all_records = self.list_records(show_output=False)
filtered_records = self._filter_records(all_records, type, name)
for record in filtered_records:
all_records.remove(record)
all_records.append({
"name": name,
"type": type,
"content": self._bind_format_target(type, content),
"ttl": self.options.get('ttl') or 86400
})
self.client.set_dns_entries(self.options.get('domain'), self._convert_records_back(all_records))
status = len(self.list_records(type, name, content, show_output=False)) >= 1
print("update_record: {0}".format(status))
return status
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not (type or name or content):
raise Exception("At least one of type, name or content must be specified.")
all_records = self.list_records(show_output=False)
filtered_records = self._filter_records(all_records, type, name, content)
for record in filtered_records:
all_records.remove(record)
self.client.set_dns_entries(self.options.get('domain'), self._convert_records_back(all_records))
status = len(self.list_records(type, name, content, show_output=False)) == 0
print("delete_record: {0}".format(status))
return status
def _full_name(self, record_name):
if record_name == "@":
record_name = self.options['domain']
return super(Provider, self)._full_name(record_name)
def _relative_name(self, record_name):
name = super(Provider, self)._relative_name(record_name)
if not name:
name = "@"
return name
def _bind_format_target(self, type, target):
if type == "CNAME" and not target.endswith("."):
target += "."
return target
# Convert the objects from transip to dicts, for easier processing
def _convert_records(self, records):
_records = []
for record in records:
_records.append({
"id": "{0}-{1}".format(self._full_name(record.name), record.type),
"name": self._full_name(record.name),
"type": record.type,
"content": record.content,
"ttl": record.expire
})
return _records
def _to_dns_entry(self, _entry):
return DnsEntry(self._relative_name(_entry['name']), _entry['ttl'], _entry['type'], _entry['content'])
def _convert_records_back(self, _records):
return [self._to_dns_entry(record) for record in _records]
# Filter a list of records based on criteria
def _filter_records(self, records, type=None, name=None, content=None):
_records = []
for record in records:
if (not type or record['type'] == type) and \
(not name or record['name'] == self._full_name(name)) and \
(not content or record['content'] == content):
_records.append(record)
return _records
<|code_end|>
setup.py
<|code_start|>"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, listdir
version = 'unknown'
with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION'), encoding='utf-8') as version_file:
version = version_file.read().strip()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get a list of all the providers
current_filepath = path.join(here, 'lexicon', 'providers')
providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
setup(
name='dns-lexicon',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
long_description=long_description,
# The project's main homepage.
url='https://github.com/AnalogJ/lexicon',
# Author details
author='Jason Kulatunga',
author_email='jason@thesparktree.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests', 'tldextract', 'future'],
# Each dependency group in extras_require should match a provider name
# When adding a new depenency group here, please ensure that it has been
# added to optional-requirements.txt as well.
extras_require={
'route53': ['boto3'],
'transip': ['transip==0.1.0-dev']
},
dependency_links = ['git+https://github.com/benkonrath/transip-api.git#egg=transip-0.1.0-dev'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'lexicon=lexicon.__main__:main',
],
},
test_suite='tests'
)
<|code_end|>
|
function update_record() in namesilo.py has wrong query parameter
origin: data['rdata'] = content
modified: data['rrvalue'] = content
| lexicon/providers/namesilo.py
<|code_start|>from __future__ import print_function
from __future__ import absolute_import
from .base import Provider as BaseProvider
import requests
from xml.etree import ElementTree
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify key used authenticate")
class Provider(BaseProvider):
def __init__(self, options, provider_options={}):
super(Provider, self).__init__(options)
self.domain_id = None
self.api_endpoint = provider_options.get('api_endpoint') or 'https://www.namesilo.com/api'
def authenticate(self):
payload = self._get('/getDomainInfo', {'domain': self.options['domain']})
self.domain_id = self.options['domain']
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
record = {
'domain': self.domain_id,
'rrhost': self._relative_name(name),
'rrtype': type,
'rrvalue': content
}
if self.options.get('ttl'):
record['rrttl'] = self.options.get('ttl')
payload = self._get('/dnsAddRecord', record)
print('create_record: {0}'.format(True))
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
query = {'domain': self.domain_id}
payload = self._get('/dnsListRecords', query)
records = []
for record in payload.find('reply').findall('resource_record'):
processed_record = {
'type': record.find('type').text,
'name': record.find('host').text,
'ttl': record.find('ttl').text,
'content': record.find('value').text,
'id': record.find('record_id').text
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'] == content]
print('list_records: {0}'.format(records))
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {
'domain': self.domain_id,
'rrid': identifier
}
# if type:
# data['rtype'] = type
if name:
data['rrhost'] = self._relative_name(name)
if content:
data['rdata'] = content
if self.options.get('ttl'):
data['rrttl'] = self.options.get('ttl')
payload = self._get('/dnsUpdateRecord', data)
print('update_record: {0}'.format(True))
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
data = {
'domain': self.domain_id
}
if not identifier:
records = self.list_records(type, name, content)
print(records)
if len(records) == 1:
data['rrid'] = records[0]['id']
else:
raise Exception('Record identifier could not be found.')
else:
data['rrid'] = identifier
payload = self._get('/dnsDeleteRecord', data)
print('delete_record: {0}'.format(True))
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
query_params['version'] = 1
query_params['type'] = 'xml'
query_params['key'] = self.options['auth_token']
r = requests.request(action, self.api_endpoint + url, params=query_params)
#data=json.dumps(data))
r.raise_for_status() # if the request fails for any reason, throw an error.
# TODO: check if the response is an error using
tree = ElementTree.ElementTree(ElementTree.fromstring(r.content))
root = tree.getroot()
if root.find('reply').find('code').text != '300':
raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))
return root<|code_end|>
| lexicon/providers/namesilo.py
<|code_start|>from __future__ import print_function
from __future__ import absolute_import
from .base import Provider as BaseProvider
import requests
from xml.etree import ElementTree
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify key used authenticate")
class Provider(BaseProvider):
def __init__(self, options, provider_options={}):
super(Provider, self).__init__(options)
self.domain_id = None
self.api_endpoint = provider_options.get('api_endpoint') or 'https://www.namesilo.com/api'
def authenticate(self):
payload = self._get('/getDomainInfo', {'domain': self.options['domain']})
self.domain_id = self.options['domain']
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
record = {
'domain': self.domain_id,
'rrhost': self._relative_name(name),
'rrtype': type,
'rrvalue': content
}
if self.options.get('ttl'):
record['rrttl'] = self.options.get('ttl')
payload = self._get('/dnsAddRecord', record)
print('create_record: {0}'.format(True))
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
query = {'domain': self.domain_id}
payload = self._get('/dnsListRecords', query)
records = []
for record in payload.find('reply').findall('resource_record'):
processed_record = {
'type': record.find('type').text,
'name': record.find('host').text,
'ttl': record.find('ttl').text,
'content': record.find('value').text,
'id': record.find('record_id').text
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'] == content]
print('list_records: {0}'.format(records))
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {
'domain': self.domain_id,
'rrid': identifier
}
# if type:
# data['rtype'] = type
if name:
data['rrhost'] = self._relative_name(name)
if content:
data['rrvalue'] = content
if self.options.get('ttl'):
data['rrttl'] = self.options.get('ttl')
payload = self._get('/dnsUpdateRecord', data)
print('update_record: {0}'.format(True))
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
data = {
'domain': self.domain_id
}
if not identifier:
records = self.list_records(type, name, content)
print(records)
if len(records) == 1:
data['rrid'] = records[0]['id']
else:
raise Exception('Record identifier could not be found.')
else:
data['rrid'] = identifier
payload = self._get('/dnsDeleteRecord', data)
print('delete_record: {0}'.format(True))
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
query_params['version'] = 1
query_params['type'] = 'xml'
query_params['key'] = self.options['auth_token']
r = requests.request(action, self.api_endpoint + url, params=query_params)
#data=json.dumps(data))
r.raise_for_status() # if the request fails for any reason, throw an error.
# TODO: check if the response is an error using
tree = ElementTree.ElementTree(ElementTree.fromstring(r.content))
root = tree.getroot()
if root.find('reply').find('code').text != '300':
raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))
return root
<|code_end|>
|
unable to install transip dependencies or use transip plugin
Output: pip install dns-lexicon[transip]
```
Requirement already satisfied: dns-lexicon[transip] in ./lib/python2.7/site-packages
Requirement already satisfied: requests in ./lib/python2.7/site-packages (from dns-lexicon[transip])
Requirement already satisfied: future in ./lib/python2.7/site-packages (from dns-lexicon[transip])
Requirement already satisfied: tldextract in ./lib/python2.7/site-packages (from dns-lexicon[transip])
Collecting transip==0.1.0-dev; extra == "transip" (from dns-lexicon[transip])
Could not find a version that satisfies the requirement transip==0.1.0-dev; extra == "transip" (from dns-lexicon[transip]) (from versions: 0.2)
No matching distribution found for transip==0.1.0-dev; extra == "transip" (from dns-lexicon[transip])
```
after manual installing the transip package i get the following error
```
Namespace(action='list', auth_api_key='../test-acme/private', auth_username='foobar', content='foo', delegated=None, domain='example.org', identifier=None, name='foo', priority=None, provider_name='transip', ttl=None, type='NS')
Traceback (most recent call last):
File "./bin/lexicon", line 11, in <module>
sys.exit(main())
File "/home/muller/lexicon/local/lib/python2.7/site-packages/lexicon/__main__.py", line 56, in main
client.execute()
File "/home/muller/lexicon/local/lib/python2.7/site-packages/lexicon/client.py", line 36, in execute
self.provider.authenticate()
File "/home/muller/lexicon/local/lib/python2.7/site-packages/lexicon/providers/transip.py", line 43, in authenticate
self.client.get_info(domain)
File "/home/muller/lexicon/local/lib/python2.7/site-packages/transip/service/domain.py", line 26, in get_info
cookie = self.build_cookie(mode=MODE_RO, method='getInfo', parameters=[domain_name])
File "/home/muller/lexicon/local/lib/python2.7/site-packages/transip/client.py", line 111, in build_cookie
timestamp=timestamp, nonce=nonce, additional=parameters))
File "/home/muller/lexicon/local/lib/python2.7/site-packages/transip/client.py", line 51, in _sign
privkey = rsa.PrivateKey.load_pkcs1(keydata)
File "/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py", line 75, in load_pkcs1
return method(keyfile)
File "/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py", line 511, in _load_pkcs1_pem
return cls._load_pkcs1_der(der)
File "/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py", line 459, in _load_pkcs1_der
as_ints = tuple(int(x) for x in priv[1:9])
File "/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py", line 459, in <genexpr>
as_ints = tuple(int(x) for x in priv[1:9])
TypeError: int() argument must be a string or a number, not 'Sequence'
```
| setup.py
<|code_start|>"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, listdir
version = 'unknown'
with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION'), encoding='utf-8') as version_file:
version = version_file.read().strip()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get a list of all the providers
current_filepath = path.join(here, 'lexicon', 'providers')
providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
setup(
name='dns-lexicon',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
long_description=long_description,
# The project's main homepage.
url='https://github.com/AnalogJ/lexicon',
# Author details
author='Jason Kulatunga',
author_email='jason@thesparktree.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests', 'tldextract', 'future'],
# Each dependency group in extras_require should match a provider name
# When adding a new depenency group here, please ensure that it has been
# added to optional-requirements.txt as well.
extras_require={
'route53': ['boto3'],
'transip': ['transip==0.1.0-dev']
},
dependency_links = ['git+https://github.com/benkonrath/transip-api.git#egg=transip-0.1.0-dev'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'lexicon=lexicon.__main__:main',
],
},
test_suite='tests'
)
<|code_end|>
| setup.py
<|code_start|>"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, listdir
version = 'unknown'
with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION'), encoding='utf-8') as version_file:
version = version_file.read().strip()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get a list of all the providers
current_filepath = path.join(here, 'lexicon', 'providers')
providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
setup(
name='dns-lexicon',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
long_description=long_description,
# The project's main homepage.
url='https://github.com/AnalogJ/lexicon',
# Author details
author='Jason Kulatunga',
author_email='jason@thesparktree.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests', 'tldextract', 'future'],
# Each dependency group in extras_require should match a provider name
# When adding a new depenency group here, please ensure that it has been
# added to optional-requirements.txt as well.
extras_require={
'route53': ['boto3'],
'transip': ['transip>=0.3.0']
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'lexicon=lexicon.__main__:main',
],
},
test_suite='tests'
)
<|code_end|>
|
Fix logging TypeError (digitalocean.py)
[This same issue](https://github.com/AnalogJ/lexicon/pull/128/commits/903af58378ab9942d817c57e0330b5f7ac26b4e9) exists in `lexicon/providers/digitalocean.py` line 111. The same edit is needed to fix it.
The error generated is:
```
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 861, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 734, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 465, in format
record.message = record.getMessage()
File "/usr/lib/python2.7/logging/__init__.py", line 329, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
Logged from file digitalocean.py, line 111
```
That section is:
```
Line 110: # is always True at this point, if a non 200 response is returned an error is raised.
Line 111: logger.debug('delete_record: {0}', True)
Line 112: return True
```
| lexicon/providers/digitalocean.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify token used authenticate to DNS provider")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.digitalocean.com/v2')
def authenticate(self):
payload = self._get('/domains/{0}'.format(self.options['domain']))
self.domain_id = self.options['domain']
def create_record(self, type, name, content):
record = {
'type': type,
'name': self._relative_name(name),
'data': content,
}
if type == 'CNAME':
record['data'] = record['data'].rstrip('.') + '.' # make sure a the data is always a FQDN for CNAMe.
payload = self._post('/domains/{0}/records'.format(self.domain_id), record)
logger.debug('create_record: %s', True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
url = '/domains/{0}/records'.format(self.domain_id)
records = []
payload = {}
next = url
while next is not None:
payload = self._get(next)
if 'links' in payload \
and 'pages' in payload['links'] \
and 'next' in payload['links']['pages']:
next = payload['links']['pages']['next']
else:
next = None
for record in payload['domain_records']:
processed_record = {
'type': record['type'],
'name': "{0}.{1}".format(record['name'], self.domain_id),
'ttl': '',
'content': record['data'],
'id': record['id']
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'].lower() == content.lower()]
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {}
if type:
data['type'] = type
if name:
data['name'] = self._relative_name(name)
if content:
data['data'] = content
payload = self._put('/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
logger.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not identifier:
records = self.list_records(type, name, content)
logger.debug('records: %s', records)
if len(records) == 1:
identifier = records[0]['id']
else:
raise Exception('Record identifier could not be found.')
payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, identifier))
# is always True at this point, if a non 200 response is returned an error is raised.
logger.debug('delete_record: {0}', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))
}
if not url.startswith(self.api_endpoint):
url = self.api_endpoint + url
r = requests.request(action, url, params=query_params,
data=json.dumps(data),
headers=default_headers)
r.raise_for_status() # if the request fails for any reason, throw an error.
if action == 'DELETE':
return ''
else:
return r.json()
<|code_end|>
lexicon/providers/dnsmadeeasy.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import contextlib
import datetime
import hmac
import json
import locale
import logging
from hashlib import sha1
import requests
from builtins import bytes
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-username", help="specify username used to authenticate")
subparser.add_argument("--auth-token", help="specify token used authenticate=")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.dnsmadeeasy.com/V2.0')
def authenticate(self):
try:
payload = self._get('/dns/managed/name', {'domainname': self.options['domain']})
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
payload = {}
else:
raise e
if not payload or not payload['id']:
raise Exception('No domain found')
self.domain_id = payload['id']
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
record = {
'type': type,
'name': self._relative_name(name),
'value': content,
'ttl': self.options['ttl']
}
payload = {}
try:
payload = self._post('/dns/managed/{0}/records/'.format(self.domain_id), record)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 400:
payload = {}
# http 400 is ok here, because the record probably already exists
logger.debug('create_record: %s', 'name' in payload)
return 'name' in payload
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
filter = {}
if type:
filter['type'] = type
if name:
filter['recordName'] = self._relative_name(name)
payload = self._get('/dns/managed/{0}/records'.format(self.domain_id), filter)
records = []
for record in payload['data']:
processed_record = {
'type': record['type'],
'name': '{0}.{1}'.format(record['name'], self.options['domain']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
processed_record = self._clean_TXT_record(processed_record)
records.append(processed_record)
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {
'id': identifier,
'ttl': self.options['ttl']
}
if name:
data['name'] = self._relative_name(name)
if content:
data['value'] = content
if type:
data['type'] = type
payload = self._put('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier), data)
logger.debug('update_record: {0}', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not identifier:
records = self.list_records(type, name, content)
logger.debug('records: %s', records)
if len(records) == 1:
identifier = records[0]['id']
else:
raise Exception('Record identifier could not be found.')
payload = self._delete('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier))
# is always True at this point, if a non 200 response is returned an error is raised.
logger.debug('delete_record: %s', True)
return True
# Helpers
# this method allows you to set the locale when doing datetime string formatting.
# https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale
@contextlib.contextmanager
def setlocale(self, *args, **kw):
saved = locale.setlocale(locale.LC_ALL)
#yield locale.setlocale(*args, **kw)
yield locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
locale.setlocale(locale.LC_ALL, saved)
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'x-dnsme-apiKey': self.options['auth_username']
}
default_auth = None
# all requests require a HMAC header and timestamp header.
now = datetime.datetime.utcnow()
# required format: Sat, 12 Feb 2011 20:59:04 GMT
with self.setlocale(locale.LC_TIME, 'en_US.utf8'):
request_date = now.strftime('%a, %d %b %Y %H:%M:%S GMT')
hashed = hmac.new(bytes(self.options['auth_token'], 'ascii'),
bytes(request_date, 'ascii'), sha1)
default_headers['x-dnsme-requestDate'] = request_date
default_headers['x-dnsme-hmac'] = hashed.hexdigest()
r = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers=default_headers,
auth=default_auth)
r.raise_for_status() # if the request fails for any reason, throw an error.
# PUT and DELETE actions dont return valid json.
if action == 'DELETE' or action == 'PUT':
return r.text
return r.json()
<|code_end|>
| lexicon/providers/digitalocean.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify token used authenticate to DNS provider")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.digitalocean.com/v2')
def authenticate(self):
payload = self._get('/domains/{0}'.format(self.options['domain']))
self.domain_id = self.options['domain']
def create_record(self, type, name, content):
record = {
'type': type,
'name': self._relative_name(name),
'data': content,
}
if type == 'CNAME':
record['data'] = record['data'].rstrip('.') + '.' # make sure a the data is always a FQDN for CNAMe.
payload = self._post('/domains/{0}/records'.format(self.domain_id), record)
logger.debug('create_record: %s', True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
url = '/domains/{0}/records'.format(self.domain_id)
records = []
payload = {}
next = url
while next is not None:
payload = self._get(next)
if 'links' in payload \
and 'pages' in payload['links'] \
and 'next' in payload['links']['pages']:
next = payload['links']['pages']['next']
else:
next = None
for record in payload['domain_records']:
processed_record = {
'type': record['type'],
'name': "{0}.{1}".format(record['name'], self.domain_id),
'ttl': '',
'content': record['data'],
'id': record['id']
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'].lower() == content.lower()]
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {}
if type:
data['type'] = type
if name:
data['name'] = self._relative_name(name)
if content:
data['data'] = content
payload = self._put('/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
logger.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not identifier:
records = self.list_records(type, name, content)
logger.debug('records: %s', records)
if len(records) == 1:
identifier = records[0]['id']
else:
raise Exception('Record identifier could not be found.')
payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, identifier))
# is always True at this point, if a non 200 response is returned an error is raised.
logger.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))
}
if not url.startswith(self.api_endpoint):
url = self.api_endpoint + url
r = requests.request(action, url, params=query_params,
data=json.dumps(data),
headers=default_headers)
r.raise_for_status() # if the request fails for any reason, throw an error.
if action == 'DELETE':
return ''
else:
return r.json()
<|code_end|>
lexicon/providers/dnsmadeeasy.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import contextlib
import datetime
import hmac
import json
import locale
import logging
from hashlib import sha1
import requests
from builtins import bytes
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-username", help="specify username used to authenticate")
subparser.add_argument("--auth-token", help="specify token used authenticate=")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.dnsmadeeasy.com/V2.0')
def authenticate(self):
try:
payload = self._get('/dns/managed/name', {'domainname': self.options['domain']})
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
payload = {}
else:
raise e
if not payload or not payload['id']:
raise Exception('No domain found')
self.domain_id = payload['id']
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
record = {
'type': type,
'name': self._relative_name(name),
'value': content,
'ttl': self.options['ttl']
}
payload = {}
try:
payload = self._post('/dns/managed/{0}/records/'.format(self.domain_id), record)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 400:
payload = {}
# http 400 is ok here, because the record probably already exists
logger.debug('create_record: %s', 'name' in payload)
return 'name' in payload
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
filter = {}
if type:
filter['type'] = type
if name:
filter['recordName'] = self._relative_name(name)
payload = self._get('/dns/managed/{0}/records'.format(self.domain_id), filter)
records = []
for record in payload['data']:
processed_record = {
'type': record['type'],
'name': '{0}.{1}'.format(record['name'], self.options['domain']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
processed_record = self._clean_TXT_record(processed_record)
records.append(processed_record)
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {
'id': identifier,
'ttl': self.options['ttl']
}
if name:
data['name'] = self._relative_name(name)
if content:
data['value'] = content
if type:
data['type'] = type
payload = self._put('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier), data)
logger.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
if not identifier:
records = self.list_records(type, name, content)
logger.debug('records: %s', records)
if len(records) == 1:
identifier = records[0]['id']
else:
raise Exception('Record identifier could not be found.')
payload = self._delete('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier))
# is always True at this point, if a non 200 response is returned an error is raised.
logger.debug('delete_record: %s', True)
return True
# Helpers
# this method allows you to set the locale when doing datetime string formatting.
# https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale
@contextlib.contextmanager
def setlocale(self, *args, **kw):
saved = locale.setlocale(locale.LC_ALL)
#yield locale.setlocale(*args, **kw)
yield locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
locale.setlocale(locale.LC_ALL, saved)
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'x-dnsme-apiKey': self.options['auth_username']
}
default_auth = None
# all requests require a HMAC header and timestamp header.
now = datetime.datetime.utcnow()
# required format: Sat, 12 Feb 2011 20:59:04 GMT
with self.setlocale(locale.LC_TIME, 'en_US.utf8'):
request_date = now.strftime('%a, %d %b %Y %H:%M:%S GMT')
hashed = hmac.new(bytes(self.options['auth_token'], 'ascii'),
bytes(request_date, 'ascii'), sha1)
default_headers['x-dnsme-requestDate'] = request_date
default_headers['x-dnsme-hmac'] = hashed.hexdigest()
r = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers=default_headers,
auth=default_auth)
r.raise_for_status() # if the request fails for any reason, throw an error.
# PUT and DELETE actions dont return valid json.
if action == 'DELETE' or action == 'PUT':
return r.text
return r.json()
<|code_end|>
|
Route53: Public and Private Zones can't be distinguished.
I've been testing out lexicon for updating DNS records via Route53, and I have a Public and Private Zone with the same domain name.
I noticed that lexicon is only searching for the domain name by name, so in my case, my internal zone was the first created and so it's the only thing lexicon itself finds for my domain name.
I was going to have it update a record for my home IP address for dynamic IP issues, but what's happening is it is only updating the Private zone's record. I've specified --identifier with the ZoneID of the Public Zone, but that is not working either.
I didn't even have a record for home.mydomain.com in my Private Zone, and it ended up creating the record just to fullfill the update. I do see in the output of lexicon both private and public zones, including the <PrivateZone>true|false</PrivateZone> specifically identifying it as private or not.
I'd like to be able to update both, differently as needed.
| lexicon/providers/route53.py
<|code_start|>"""Provide support to Lexicon for AWS Route 53 DNS changes."""
from __future__ import absolute_import
from __future__ import print_function
import logging
from .base import Provider as BaseProvider
try:
import boto3 #optional dep
import botocore #optional dep
except ImportError:
pass
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
"""Specify arguments for AWS Route 53 Lexicon Provider."""
subparser.add_argument("--auth-access-key", help="specify ACCESS_KEY used to authenticate")
subparser.add_argument("--auth-access-secret", help="specify ACCESS_SECRET used authenticate")
#TODO: these are only required for testing, we should figure out a way to remove them & update the integration tests
# to dynamically populate the auth credentials that are required.
subparser.add_argument("--auth-username", help="alternative way to specify ACCESS_KEY used to authenticate")
subparser.add_argument("--auth-token", help="alternative way to specify ACCESS_SECRET used authenticate")
class RecordSetPaginator(object):
"""Paginate through complete list of record sets."""
def __init__(self, r53_client, hosted_zone_id, max_items=None):
"""Initialize paginator."""
self.r53_client = r53_client
self.hosted_zone_id = hosted_zone_id
self.max_items = max_items
def get_record_sets(self, **kwargs):
"""Retrieve a page from API."""
return self.r53_client.list_resource_record_sets(**kwargs)
def get_base_kwargs(self):
"""Get base kwargs for API call."""
kwargs = {
'HostedZoneId': self.hosted_zone_id
}
if self.max_items is not None:
kwargs.update({
'MaxItems': str(self.max_items)
})
return kwargs
def all_record_sets(self):
"""Generator to loop through current record set.
Call next page if it exists.
"""
is_truncated = True
start_record_name = None
start_record_type = None
kwargs = self.get_base_kwargs()
while is_truncated:
if start_record_name is not None:
kwargs.update({
'StartRecordName': start_record_name,
'StartRecordType': start_record_type
})
result = self.get_record_sets(**kwargs)
for record_set in result.get('ResourceRecordSets', []):
yield record_set
is_truncated = result.get('IsTruncated', False)
start_record_name = result.get('NextRecordName', None)
start_record_type = result.get('NextRecordType', None)
class Provider(BaseProvider):
"""Provide AWS Route 53 implementation of Lexicon Provider interface."""
def __init__(self, options, engine_overrides=None):
"""Initialize AWS Route 53 DNS provider."""
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
# instantiate the client
self.r53_client = boto3.client(
'route53',
aws_access_key_id=self.options.get('auth_access_key', self.options.get('auth_username')),
aws_secret_access_key=self.options.get('auth_access_secret', self.options.get('auth_token'))
)
def authenticate(self):
"""Determine the hosted zone id for the domain."""
try:
hosted_zones = self.r53_client.list_hosted_zones_by_name()[
'HostedZones'
]
hosted_zone = next(
hz for hz in hosted_zones
if hz['Name'] == '{0}.'.format(self.options['domain'])
)
self.domain_id = hosted_zone['Id']
except StopIteration:
raise Exception('No domain found')
def _change_record_sets(self, action, type, name, content):
ttl = self.options['ttl']
value = '"{0}"'.format(content) if type in ['TXT', 'SPF'] else content
try:
self.r53_client.change_resource_record_sets(
HostedZoneId=self.domain_id,
ChangeBatch={
'Comment': '{0} using lexicon Route 53 provider'.format(
action
),
'Changes': [
{
'Action': action,
'ResourceRecordSet': {
'Name': self._fqdn_name(name),
'Type': type,
'TTL': ttl if ttl is not None else 300,
'ResourceRecords': [
{
'Value': value
}
]
}
}
]
}
)
return True
except botocore.exceptions.ClientError as e:
logger.debug(e.message, exc_info=True)
def create_record(self, type, name, content):
"""Create a record in the hosted zone."""
return self._change_record_sets('CREATE', type, name, content)
def update_record(self, identifier=None, type=None, name=None, content=None):
"""Update a record from the hosted zone."""
return self._change_record_sets('UPSERT', type, name, content)
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""Delete a record from the hosted zone."""
return self._change_record_sets('DELETE', type, name, content)
def _format_content(self, type, content):
return content[1:-1] if type in ['TXT', 'SPF'] else content
def list_records(self, type=None, name=None, content=None):
"""List all records for the hosted zone."""
records = []
paginator = RecordSetPaginator(self.r53_client, self.domain_id)
for record in paginator.all_record_sets():
if type is not None and record['Type'] != type:
continue
if name is not None and record['Name'] != self._fqdn_name(name):
continue
if record.get('AliasTarget', None) is not None:
record_content = [record['AliasTarget'].get('DNSName', None)]
if record.get('ResourceRecords', None) is not None:
record_content = [self._format_content(record['Type'], value['Value']) for value
in record['ResourceRecords']]
if content is not None and content not in record_content:
continue
logger.debug('record: %s', record)
records.append({
'type': record['Type'],
'name': self._full_name(record['Name']),
'ttl': record.get('TTL', None),
'content': record_content[0] if len(record_content) == 1 else record_content,
})
logger.debug('list_records: %s', records)
return records
<|code_end|>
| lexicon/providers/route53.py
<|code_start|>"""Provide support to Lexicon for AWS Route 53 DNS changes."""
from __future__ import absolute_import
from __future__ import print_function
import logging
from .base import Provider as BaseProvider
try:
import boto3 #optional dep
import botocore #optional dep
except ImportError:
pass
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
"""Specify arguments for AWS Route 53 Lexicon Provider."""
subparser.add_argument("--auth-access-key", help="specify ACCESS_KEY used to authenticate")
subparser.add_argument("--auth-access-secret", help="specify ACCESS_SECRET used authenticate")
subparser.add_argument("--private-zone", help="indicates what kind of hosted zone to use, if true, use only private zones, if false, use only public zones")
#TODO: these are only required for testing, we should figure out a way to remove them & update the integration tests
# to dynamically populate the auth credentials that are required.
subparser.add_argument("--auth-username", help="alternative way to specify ACCESS_KEY used to authenticate")
subparser.add_argument("--auth-token", help="alternative way to specify ACCESS_SECRET used authenticate")
class RecordSetPaginator(object):
"""Paginate through complete list of record sets."""
def __init__(self, r53_client, hosted_zone_id, max_items=None):
"""Initialize paginator."""
self.r53_client = r53_client
self.hosted_zone_id = hosted_zone_id
self.max_items = max_items
def get_record_sets(self, **kwargs):
"""Retrieve a page from API."""
return self.r53_client.list_resource_record_sets(**kwargs)
def get_base_kwargs(self):
"""Get base kwargs for API call."""
kwargs = {
'HostedZoneId': self.hosted_zone_id
}
if self.max_items is not None:
kwargs.update({
'MaxItems': str(self.max_items)
})
return kwargs
def all_record_sets(self):
"""Generator to loop through current record set.
Call next page if it exists.
"""
is_truncated = True
start_record_name = None
start_record_type = None
kwargs = self.get_base_kwargs()
while is_truncated:
if start_record_name is not None:
kwargs.update({
'StartRecordName': start_record_name,
'StartRecordType': start_record_type
})
result = self.get_record_sets(**kwargs)
for record_set in result.get('ResourceRecordSets', []):
yield record_set
is_truncated = result.get('IsTruncated', False)
start_record_name = result.get('NextRecordName', None)
start_record_type = result.get('NextRecordType', None)
class Provider(BaseProvider):
"""Provide AWS Route 53 implementation of Lexicon Provider interface."""
def __init__(self, options, engine_overrides=None):
"""Initialize AWS Route 53 DNS provider."""
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.private_zone = options.get('private_zone', None)
# instantiate the client
self.r53_client = boto3.client(
'route53',
aws_access_key_id=self.options.get('auth_access_key', self.options.get('auth_username')),
aws_secret_access_key=self.options.get('auth_access_secret', self.options.get('auth_token'))
)
def filter_zone(self, hz):
if self.private_zone is not None:
if hz['Config']['PrivateZone'] != self.str2bool(self.private_zone):
return False
if hz['Name'] != '{0}.'.format(self.options['domain']):
return False
return True
@staticmethod
def str2bool(input_string):
return input_string.lower() in ('true', 'yes')
def authenticate(self):
"""Determine the hosted zone id for the domain."""
try:
hosted_zones = self.r53_client.list_hosted_zones_by_name()[
'HostedZones'
]
hosted_zone = next(
hz for hz in hosted_zones
if self.filter_zone(hz)
)
self.domain_id = hosted_zone['Id']
except StopIteration:
raise Exception('No domain found')
def _change_record_sets(self, action, type, name, content):
ttl = self.options['ttl']
value = '"{0}"'.format(content) if type in ['TXT', 'SPF'] else content
try:
self.r53_client.change_resource_record_sets(
HostedZoneId=self.domain_id,
ChangeBatch={
'Comment': '{0} using lexicon Route 53 provider'.format(
action
),
'Changes': [
{
'Action': action,
'ResourceRecordSet': {
'Name': self._fqdn_name(name),
'Type': type,
'TTL': ttl if ttl is not None else 300,
'ResourceRecords': [
{
'Value': value
}
]
}
}
]
}
)
return True
except botocore.exceptions.ClientError as e:
logger.debug(e.message, exc_info=True)
def create_record(self, type, name, content):
"""Create a record in the hosted zone."""
return self._change_record_sets('CREATE', type, name, content)
def update_record(self, identifier=None, type=None, name=None, content=None):
"""Update a record from the hosted zone."""
return self._change_record_sets('UPSERT', type, name, content)
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""Delete a record from the hosted zone."""
return self._change_record_sets('DELETE', type, name, content)
def _format_content(self, type, content):
return content[1:-1] if type in ['TXT', 'SPF'] else content
def list_records(self, type=None, name=None, content=None):
"""List all records for the hosted zone."""
records = []
paginator = RecordSetPaginator(self.r53_client, self.domain_id)
for record in paginator.all_record_sets():
if type is not None and record['Type'] != type:
continue
if name is not None and record['Name'] != self._fqdn_name(name):
continue
if record.get('AliasTarget', None) is not None:
record_content = [record['AliasTarget'].get('DNSName', None)]
if record.get('ResourceRecords', None) is not None:
record_content = [self._format_content(record['Type'], value['Value']) for value
in record['ResourceRecords']]
if content is not None and content not in record_content:
continue
logger.debug('record: %s', record)
records.append({
'type': record['Type'],
'name': self._full_name(record['Name']),
'ttl': record.get('TTL', None),
'content': record_content[0] if len(record_content) == 1 else record_content,
})
logger.debug('list_records: %s', records)
return records
<|code_end|>
|
Namecheap support not optional
Unlike route53 or softlayer and unlike what setup.py suggests, the namecheap provider is not optional in 2.1.17.
| lexicon/providers/namecheap.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import logging
import namecheap
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument(
'--auth-token',
help='specify api token used to authenticate'
)
subparser.add_argument(
'--auth-username',
help='specify email address used to authenticate'
)
# FIXME What is the client IP used for?
subparser.add_argument(
'--auth-client-ip',
help='Client IP address to send to Namecheap API calls',
default='127.0.0.1'
)
subparser.add_argument(
'--auth-sandbox',
help='Whether to use the sandbox server',
action='store_true'
)
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.options = options
self.client = namecheap.Api(
ApiUser=options.get('auth_username',''),
ApiKey=options.get('auth_token',''),
UserName=options.get('auth_username',''),
ClientIP=options.get('auth_client_ip',''),
sandbox=options.get('auth_sandbox', False),
debug=False
)
self.domain = self.options['domain']
self.domain_id = None
def authenticate(self):
try:
domain_names = [x['Name'] for x in self.client.domains_getList()]
except namecheap.ApiError:
raise Exception('Authentication failed')
if self.domain not in domain_names:
raise Exception('The domain {} is not controlled by this Namecheap '
'account'.format(self.domain))
# FIXME What is this for?
self.domain_id = self.domain
# Create record. If record already exists with the same content, do nothing
def create_record(self, type, name, content):
record = {
# required
'Type': type,
'Name': self._relative_name(name),
'Address': content
}
# logger.debug('create_record: %s', 'id' in payload)
# return 'id' in payload
self.client.domains_dns_addHost(self.domain, record)
return True
# List all records. Return an empty list if no records found.
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is
# received.
def list_records(self, type=None, name=None, content=None, id=None):
records = []
raw_records = self.client.domains_dns_getHosts(self.domain)
for record in raw_records:
records.append(self._convert_to_lexicon(record))
if id:
records = [record for record in records if record['id'] == id]
if type:
records = [record for record in records if record['type'] == type]
if name:
if name.endswith('.'):
name = name[:-1]
records = [record for record in records if name in record['name'] ]
if content:
records = [record for record in records if record['content'].lower() == content.lower()]
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
# Delete record if it exists
self.delete_record(identifier, type, name, content)
return self.create_record(type, name, content)
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
record = self.list_records(type=type, name=name, content=content, id=identifier)
if record:
self.client.domains_dns_delHost(self.domain, self._convert_to_namecheap(record[0]))
return True
else:
return False
def _convert_to_namecheap(self, record):
""" converts from lexicon format record to namecheap format record,
suitable to sending through the api to namecheap"""
name = record['name']
if name.endswith('.'):
name = name[:-1]
short_name = name[:name.find(self.domain)-1]
processed_record = {
'Type': record['type'],
'Name': short_name,
'TTL': record['ttl'],
'Address': record['content'],
'HostId': record['id']
}
return processed_record
def _convert_to_lexicon(self, record):
""" converts from namecheap raw record format to lexicon format record
"""
name = record['Name']
if self.domain not in name:
name = "{}.{}".format(name,self.domain)
processed_record = {
'type': record['Type'],
'name': '{0}.{1}'.format(record['Name'], self.domain),
'ttl': record['TTL'],
'content': record['Address'],
'id': record['HostId']
}
return processed_record
<|code_end|>
| lexicon/providers/namecheap.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import logging
from .base import Provider as BaseProvider
try:
import namecheap #optional dep
except ImportError:
pass
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument(
'--auth-token',
help='specify api token used to authenticate'
)
subparser.add_argument(
'--auth-username',
help='specify email address used to authenticate'
)
# FIXME What is the client IP used for?
subparser.add_argument(
'--auth-client-ip',
help='Client IP address to send to Namecheap API calls',
default='127.0.0.1'
)
subparser.add_argument(
'--auth-sandbox',
help='Whether to use the sandbox server',
action='store_true'
)
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.options = options
self.client = namecheap.Api(
ApiUser=options.get('auth_username',''),
ApiKey=options.get('auth_token',''),
UserName=options.get('auth_username',''),
ClientIP=options.get('auth_client_ip',''),
sandbox=options.get('auth_sandbox', False),
debug=False
)
self.domain = self.options['domain']
self.domain_id = None
def authenticate(self):
try:
domain_names = [x['Name'] for x in self.client.domains_getList()]
except namecheap.ApiError:
raise Exception('Authentication failed')
if self.domain not in domain_names:
raise Exception('The domain {} is not controlled by this Namecheap '
'account'.format(self.domain))
# FIXME What is this for?
self.domain_id = self.domain
# Create record. If record already exists with the same content, do nothing
def create_record(self, type, name, content):
record = {
# required
'Type': type,
'Name': self._relative_name(name),
'Address': content
}
# logger.debug('create_record: %s', 'id' in payload)
# return 'id' in payload
self.client.domains_dns_addHost(self.domain, record)
return True
# List all records. Return an empty list if no records found.
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is
# received.
def list_records(self, type=None, name=None, content=None, id=None):
records = []
raw_records = self.client.domains_dns_getHosts(self.domain)
for record in raw_records:
records.append(self._convert_to_lexicon(record))
if id:
records = [record for record in records if record['id'] == id]
if type:
records = [record for record in records if record['type'] == type]
if name:
if name.endswith('.'):
name = name[:-1]
records = [record for record in records if name in record['name'] ]
if content:
records = [record for record in records if record['content'].lower() == content.lower()]
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
# Delete record if it exists
self.delete_record(identifier, type, name, content)
return self.create_record(type, name, content)
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
record = self.list_records(type=type, name=name, content=content, id=identifier)
if record:
self.client.domains_dns_delHost(self.domain, self._convert_to_namecheap(record[0]))
return True
else:
return False
def _convert_to_namecheap(self, record):
""" converts from lexicon format record to namecheap format record,
suitable to sending through the api to namecheap"""
name = record['name']
if name.endswith('.'):
name = name[:-1]
short_name = name[:name.find(self.domain)-1]
processed_record = {
'Type': record['type'],
'Name': short_name,
'TTL': record['ttl'],
'Address': record['content'],
'HostId': record['id']
}
return processed_record
def _convert_to_lexicon(self, record):
""" converts from namecheap raw record format to lexicon format record
"""
name = record['Name']
if self.domain not in name:
name = "{}.{}".format(name,self.domain)
processed_record = {
'type': record['Type'],
'name': '{0}.{1}'.format(record['Name'], self.domain),
'ttl': record['TTL'],
'content': record['Address'],
'id': record['HostId']
}
return processed_record
<|code_end|>
|
Support multiple records of the same type on an identifier
In [certbot](https://github.com/certbot/certbot), we use lexicon to create TXT records for a domain to prove to a SSL/TLS certificate authority that the user has control over that domain so the CA will give you a certificate which you can use to set up HTTPS. Due to the way [the protocol](https://tools.ietf.org/html/draft-ietf-acme-acme-09) works, we sometimes have to set multiple TXT records for a single domain.
I'm not sure if each of the providers we use have problems with creating multiple TXT records on a domain (although I know this works with your code for dnsimple), but they all have problems deleting it and immediately error out if there is more than 1 record for the domain:
* [cloudxns](https://github.com/AnalogJ/lexicon/blob/a8c478ad44c06012c3aaaac077f70e67c12a136e/lexicon/providers/cloudxns.py#L125)
* [dnsimple](https://github.com/AnalogJ/lexicon/blob/903af58378ab9942d817c57e0330b5f7ac26b4e9/lexicon/providers/dnsimple.py#L128)
* [dnsmadeeasy](https://github.com/AnalogJ/lexicon/blob/9ad8a9cbf9e4b5c24fe525b274966c2432baf132/lexicon/providers/dnsmadeeasy.py#L121)
* [luadns](https://github.com/AnalogJ/lexicon/blob/a8c478ad44c06012c3aaaac077f70e67c12a136e/lexicon/providers/luadns.py#L98)
* [nsone](https://github.com/AnalogJ/lexicon/blob/8b649338b6c03c7bfb2d465249cd759de62296cb/lexicon/providers/nsone.py#L175)
Can support for this be added?
| lexicon/providers/onapp.py
<|code_start|><|code_end|>
| lexicon/providers/onapp.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from requests.auth import HTTPBasicAuth
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.description = '''
The OnApp provider requires your OnApp account\'s email address and
API token, which can be found on your /profile page on the Control Panel interface.
The server is your dashboard URL, in the format of e.g. https://dashboard.youronapphost.org'''
subparser.add_argument('--auth-username', help='specify email address of the OnApp account')
subparser.add_argument('--auth-token', help='specify API Key for the OnApp account')
subparser.add_argument('--auth-server', help='specify URL to the OnApp Control Panel Server')
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
if not self.options.get('auth_username'):
raise Exception('Error, OnApp Email Address is not defined')
if not self.options.get('auth_token'):
raise Exception('Error, OnApp API Key is not defined')
if not self.options.get('auth_server'):
raise Exception('Error, OnApp Control Panel URL is not defined')
self.session = requests.Session()
def authenticate(self):
domain = self.options.get('domain')
zones = self._get('/dns_zones.json')
for zone in zones:
if zone['dns_zone']['name'] == domain:
self.domain_id = zone['dns_zone']['id']
break
if self.domain_id == None:
raise Exception('Could not find {0} in OnApp DNS Zones'.format(domain))
def create_record(self, type, name, content):
data = {
'name': self._relative_name(name),
'type': type,
self._key_for_record_type(type): content
}
ttl = self.options.get('ttl')
if ttl:
data['ttl'] = "{0}".format(ttl)
result = self._post('/dns_zones/{0}/records.json'.format(self.domain_id), { 'dns_record': data })
logger.debug('create_record: %s', result)
return True
def list_records(self, type=None, name=None, content=None):
records = []
response = self._get('/dns_zones/{0}/records.json'.format(self.domain_id))
for recordType in response['dns_zone']['records']:
# For now we do not support other RR types so we ignore them, also see _key_for_record_type
if recordType not in ('A','AAAA','CNAME','TXT'):
continue
if type and recordType != type:
continue
for record in response['dns_zone']['records'][recordType]:
record = record['dns_record']
if name and record['name'] != self._relative_name(name):
continue
recordContent = record[self._key_for_record_type(recordType)]
if content and recordContent != content:
continue
records.append({
'id': record['id'],
'name': self._full_name(record['name']),
'type': record['type'],
'ttl': record['ttl'],
'content': recordContent
})
logger.debug('list_records: %s', records)
return records
def update_record(self, identifier, type=None, name=None, content=None):
if not identifier:
existing = self._guess_record(type, name)
identifier = existing['id']
ttl = self.options.get('ttl')
if not name or not ttl:
if not existing:
existing = self._get('/dns_zones/{0}/records/{1}.json'.format(self.domain_id, identifier))
if not name:
name = existing['name']
if not ttl:
ttl = existing['ttl']
request = {
'name': self._relative_name(name),
'ttl': '{0}'.format(ttl),
self._key_for_record_type(type): content
}
result = self._put('/dns_zones/{0}/records/{1}.json'.format(self.domain_id, identifier), { 'dns_record': request })
logger.debug('update_record: %s', result)
return True
def delete_record(self, identifier=None, type=None, name=None, content=None):
deletion_ids = []
if not identifier:
records = self.list_records(type, name, content)
deletion_ids = [ record['id'] for record in records ]
else:
deletion_ids.append(identifier)
for id in deletion_ids:
self._delete('/dns_zones/{0}/records/{1}.json'.format(self.domain_id, id))
logger.debug('delete_record: %s', True)
return True
def _request(self, action='GET', url='/', data=None, query_params=None):
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
target = self.options['auth_server'] + url
body = ''
if data is not None:
body = json.dumps(data)
auth = HTTPBasicAuth(self.options['auth_username'], self.options['auth_token'])
request = requests.Request(action, target, data=body, headers=headers, params=query_params, auth=auth)
prepared_request = self.session.prepare_request(request)
result = self.session.send(prepared_request)
result.raise_for_status()
if result.text:
return result.json()
else:
return None
def _key_for_record_type(self, record_type):
if record_type in ('A','AAAA'):
return 'ip'
elif record_type == 'CNAME':
return 'hostname'
elif record_type == 'TXT':
return 'txt'
elif record_type in ('MX','NS', 'SOA', 'SRV', 'LOC'):
raise Exception('{0} record type is not supported in the OnApp Provider'.format(record_type))
def _guess_record(self, type, name=None, content=None):
records = self.list_records(type=type, name=name, content=content)
if len(records) == 1:
return records[0]
elif len(records) > 1:
raise Exception('Identifier was not provided and several existing records match the request for {0}/{1}'.format(type,name))
elif len(records) == 0:
raise Exception('Identifier was not provided and no existing records match the request for {0}/{1}'.format(type,name)) <|code_end|>
|
Return value for list?
I very well might be looking at this wrong, but it appears that out of the box the "executable" that gets installed for linux can't actually get the list results as a return variable, only as a stdout debug message?
| lexicon/__main__.py
<|code_start|>#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import argparse
import importlib
import logging
import os
import sys
import pkg_resources
from .client import Client
#based off https://docs.python.org/2/howto/argparse.html
logger = logging.getLogger(__name__)
def BaseProviderParser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("action", help="specify the action to take", default='list', choices=['create', 'list', 'update', 'delete'])
parser.add_argument("domain", help="specify the domain, supports subdomains as well")
parser.add_argument("type", help="specify the entry type", default='TXT', choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC'])
parser.add_argument("--name", help="specify the record name")
parser.add_argument("--content", help="specify the record content")
parser.add_argument("--ttl", type=int, help="specify the record time-to-live")
parser.add_argument("--priority", help="specify the record priority")
parser.add_argument("--identifier", help="specify the record for update or delete actions")
parser.add_argument("--log_level", help="specify the log level", default="DEBUG", choices=["CRITICAL","ERROR","WARNING","INFO","DEBUG","NOTSET"])
return parser
def MainParser():
current_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'providers')
providers = [os.path.splitext(f)[0] for f in os.listdir(current_filepath) if os.path.isfile(os.path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
providers = [x for x in providers if not x.startswith('.')]
providers = sorted(providers)
parser = argparse.ArgumentParser(description='Create, Update, Delete, List DNS entries')
try:
version = pkg_resources.get_distribution("dns-lexicon").version
except pkg_resources.DistributionNotFound:
version = 'unknown'
parser.add_argument('--version', help="show the current version of lexicon", action='version', version='%(prog)s {0}'.format(version))
parser.add_argument('--delegated', help="specify the delegated domain")
subparsers = parser.add_subparsers(dest='provider_name', help='specify the DNS provider to use')
subparsers.required = True
for provider in providers:
provider_module = importlib.import_module('lexicon.providers.' + provider)
provider_parser = getattr(provider_module, 'ProviderParser')
subparser = subparsers.add_parser(provider, help='{0} provider'.format(provider), parents=[BaseProviderParser()])
provider_parser(subparser)
return parser
#dynamically determine all the providers available.
def main():
parsed_args = MainParser().parse_args()
log_level = logging.getLevelName(parsed_args.log_level)
logging.basicConfig(stream=sys.stdout, level=log_level, format='%(message)s')
logger.debug('Arguments: %s', parsed_args)
client = Client(parsed_args.__dict__)
client.execute()
if __name__ == '__main__':
main()
<|code_end|>
| lexicon/__main__.py
<|code_start|>#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import argparse
import importlib
import logging
import os
import sys
import json
import pkg_resources
from .client import Client
#based off https://docs.python.org/2/howto/argparse.html
logger = logging.getLogger(__name__)
def BaseProviderParser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('action', help='specify the action to take', default='list', choices=['create', 'list', 'update', 'delete'])
parser.add_argument('domain', help='specify the domain, supports subdomains as well')
parser.add_argument('type', help='specify the entry type', default='TXT', choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC'])
parser.add_argument('--name', help='specify the record name')
parser.add_argument('--content', help='specify the record content')
parser.add_argument('--ttl', type=int, help='specify the record time-to-live')
parser.add_argument('--priority', help='specify the record priority')
parser.add_argument('--identifier', help='specify the record for update or delete actions')
parser.add_argument('--log_level', help='specify the log level', default='ERROR', choices=['CRITICAL','ERROR','WARNING','INFO','DEBUG','NOTSET'])
parser.add_argument('--output',
help='specify the type of output: by default a formatted table (TABLE), a formatted table without header (TABLE-NO-HEADER), a JSON string (JSON) or no output (QUIET)',
default='TABLE', choices=['TABLE', 'TABLE-NO-HEADER', 'JSON', 'QUIET'])
return parser
def MainParser():
current_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'providers')
providers = [os.path.splitext(f)[0] for f in os.listdir(current_filepath) if os.path.isfile(os.path.join(current_filepath, f))]
providers = list(set(providers))
providers.remove('base')
providers.remove('__init__')
providers = [x for x in providers if not x.startswith('.')]
providers = sorted(providers)
parser = argparse.ArgumentParser(description='Create, Update, Delete, List DNS entries')
try:
version = pkg_resources.get_distribution('dns-lexicon').version
except pkg_resources.DistributionNotFound:
version = 'unknown'
parser.add_argument('--version', help='show the current version of lexicon', action='version', version='%(prog)s {0}'.format(version))
parser.add_argument('--delegated', help='specify the delegated domain')
subparsers = parser.add_subparsers(dest='provider_name', help='specify the DNS provider to use')
subparsers.required = True
for provider in providers:
provider_module = importlib.import_module('lexicon.providers.' + provider)
provider_parser = getattr(provider_module, 'ProviderParser')
subparser = subparsers.add_parser(provider, help='{0} provider'.format(provider), parents=[BaseProviderParser()])
provider_parser(subparser)
return parser
# Convert returned JSON into a nice table for command line usage
def generate_table_result(logger, output=None, without_header=None):
try:
_ = (entry for entry in output)
except:
logger.debug('Command output is not iterable, and then cannot be printed with --quiet parameter not enabled.')
return None
array = [[row['id'], row['type'], row['name'], row['content'], row['ttl']] for row in output]
# Insert header (insert before calculating the max width of each column to take headers size into account)
if not without_header:
headers = ['ID', 'TYPE', 'NAME', 'CONTENT', 'TTL']
array.insert(0, headers)
columnWidths = [0, 0, 0, 0, 0]
# Find max width for each column
for row in array:
for idx, col in enumerate(row):
width = len(str(col))
if width > columnWidths[idx]:
columnWidths[idx] = width
# Add a 'nice' separator
if not without_header:
array.insert(1, ['-' * columnWidths[idx] for idx in range(len(columnWidths))])
# Construct table to be printed
table = []
for row in array:
rowList = []
for idx, col in enumerate(row):
rowList.append(str(col).ljust(columnWidths[idx]))
table.append(' '.join(rowList))
# Return table
return '\n'.join(table)
# Print the relevant output for given output_type
def handle_output(results, output_type):
if not output_type == 'QUIET':
if not output_type == 'JSON':
table = generate_table_result(logger, results, output_type == 'TABLE-NO-HEADER')
if table:
print(table)
else:
try:
_ = (entry for entry in results)
json_str = json.dumps(results)
if json_str:
print(json_str)
except:
logger.debug('Output is not a JSON, and then cannot be printed with --output=JSON parameter.')
pass
# Dynamically determine all the providers available.
def main():
parsed_args = MainParser().parse_args()
log_level = logging.getLevelName(parsed_args.log_level)
logging.basicConfig(stream=sys.stdout, level=log_level, format='%(message)s')
logger.debug('Arguments: %s', parsed_args)
client = Client(vars(parsed_args))
results = client.execute()
handle_output(results, parsed_args.output)
if __name__ == '__main__':
main()
<|code_end|>
|
[Regression on #PR-203] CloudNS authentication broken when `auth-id` supplied
Provider CloudNS fails to authenticate when using auth-id for authentication, rather than auth-subid or auth-subuser.
Line 146 in lexicon/providers/cloudns.py reads `if self.is_given_option(self.options['auth_id']):`. It should instead should read `if self.is_given_option('auth_id'):`.
I've inlined a patch below which fixes this. Sorry if this is more effort for you than a PR.
--- cloudns.py
+++ cloudns.pr203fix.py
@@ -143,7 +143,7 @@
if not self.options['auth_password']:
raise Exception('No valid authentication data passed, expected: auth-password')
- if self._is_given_option(self.options['auth_id']):
+ if self._is_given_option('auth_id'):
return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}
elif self._is_given_option('auth_subid'):
return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}
| lexicon/providers/cloudns.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
identity_group = subparser.add_mutually_exclusive_group()
identity_group.add_argument("--auth-id", help="specify user id used to authenticate")
identity_group.add_argument("--auth-subid", help="specify subuser id used to authenticate")
identity_group.add_argument("--auth-subuser", help="specify subuser name used to authenticate")
subparser.add_argument("--auth-password", help="specify password used to authenticate")
subparser.add_argument("--weight", help="specify the SRV record weight")
subparser.add_argument("--port", help="specify the SRV record port")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.cloudns.net')
def authenticate(self):
payload = self._get('/dns/get-zone-info.json', {'domain-name': self.options['domain']})
self.domain_id = payload['name']
logger.debug('authenticate: %s', payload)
def create_record(self, type, name, content):
# Skip execution if such a record already exists
existing_records = self.list_records(type, name, content)
if len(existing_records) > 0:
return True
# Build parameters for adding a new record
params = {
'domain-name': self.domain_id,
'record-type': type,
'host': self._relative_name(name),
'record': content
}
if self.options['ttl']:
params['ttl'] = self.options['ttl']
if self.options['priority']:
params['priority'] = self.options['priority']
if self.options['weight']:
params['weight'] = self.options['weight']
if self.options['port']:
params['port'] = self.options['port']
# Add new record by calling the ClouDNS API
payload = self._post('/dns/add-record.json', params)
logger.debug('create_record: %s', payload)
# Error handling is already covered by self._request
return True
def list_records(self, type=None, name=None, content=None):
# Build parameters to make use of the built-in API filtering
params = {'domain-name': self.domain_id}
if type:
params['type'] = type
if name:
params['host'] = self._relative_name(name)
# Fetch and parse all records for the given zone
payload = self._get('/dns/records.json', params)
payload = payload if not isinstance(payload, list) else {}
records = []
for record in payload.values():
records.append({
'type': record['type'],
'name': self._full_name(record['host']),
'ttl': record['ttl'],
'content': record['record'],
'id': record['id']
})
# Filter by content manually as API does not support that
if content:
records = [record for record in records if record['content'] == content]
# Print records as debug output and return them
logger.debug('list_records: %s', records)
return records
def update_record(self, identifier, type=None, name=None, content=None):
# Try to find record if no identifier was specified
if not identifier:
identifier = self._find_record_identifier(type, name, None)
# Build parameters for updating an existing record
params = {'domain-name': self.domain_id, 'record-id': identifier}
if name:
params['host'] = self._relative_name(name)
if content:
params['record'] = content
if self.options.get('ttl'):
params['ttl'] = self.options.get('ttl')
if self.options['priority']:
params['priority'] = self.options['priority']
if self.options['weight']:
params['weight'] = self.options['weight']
if self.options['port']:
params['port'] = self.options['port']
# Update existing record by calling the ClouDNS API
payload = self._post('/dns/mod-record.json', params)
logger.debug('update_record: %s', payload)
# Error handling is already covered by self._request
return True
def delete_record(self, identifier=None, type=None, name=None, content=None):
# Try to find record if no identifier was specified
delete_record_id = []
if not identifier:
records = self.list_records(type, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
logger.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
# Delete existing record by calling the ClouDNS API
payload = self._post('/dns/delete-record.json', {'domain-name': self.domain_id, 'record-id': record_id})
logger.debug('delete_record: %s', True)
# Error handling is already covered by self._request
return True
def _is_given_option(self, key):
fallback_fn = self.engine_overrides.get('fallbackFn', (lambda x: None))
return self.options[key] and self.options[key] != fallback_fn(key)
def _build_authentication_data(self):
if not self.options['auth_password']:
raise Exception('No valid authentication data passed, expected: auth-password')
if self._is_given_option(self.options['auth_id']):
return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}
elif self._is_given_option('auth_subid'):
return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}
elif self._is_given_option('auth_subuser'):
return {'sub-auth-user': self.options['auth_subuser'], 'auth-password': self.options['auth_password']}
elif self.options['auth_id'] or self.options['auth_subid'] or self.options['auth_subuser']:
# All the options were passed with a fallback value, return an empty dictionary.
return {}
else:
raise Exception('No valid authentication data passed, expected: auth-id, auth-subid, auth-subuser')
def _find_record_identifier(self, type, name, content):
records = self.list_records(type, name, content)
logger.debug('records: %s', records)
if len(records) == 1:
return records[0]['id']
else:
raise Exception('Record identifier could not be found.')
def _request(self, action='GET', url='/', data=None, query_params=None):
# Set default values for missing arguments
data = data if data else {}
query_params = query_params if query_params else {}
# Merge authentication data into request
if action == 'GET':
query_params.update(self._build_authentication_data())
else:
data.update(self._build_authentication_data())
# Fire request against ClouDNS API and parse result as JSON
r = requests.request(action, self.api_endpoint + url, params=query_params, data=data)
r.raise_for_status()
payload = r.json()
# Check ClouDNS specific status code and description
if 'status' in payload and 'statusDescription' in payload and payload['status'] != 'Success':
raise Exception('ClouDNS API request has failed: ' + payload['statusDescription'])
# Return payload
return payload
<|code_end|>
| lexicon/providers/cloudns.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
identity_group = subparser.add_mutually_exclusive_group()
identity_group.add_argument("--auth-id", help="specify user id used to authenticate")
identity_group.add_argument("--auth-subid", help="specify subuser id used to authenticate")
identity_group.add_argument("--auth-subuser", help="specify subuser name used to authenticate")
subparser.add_argument("--auth-password", help="specify password used to authenticate")
subparser.add_argument("--weight", help="specify the SRV record weight")
subparser.add_argument("--port", help="specify the SRV record port")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.cloudns.net')
def authenticate(self):
payload = self._get('/dns/get-zone-info.json', {'domain-name': self.options['domain']})
self.domain_id = payload['name']
logger.debug('authenticate: %s', payload)
def create_record(self, type, name, content):
# Skip execution if such a record already exists
existing_records = self.list_records(type, name, content)
if len(existing_records) > 0:
return True
# Build parameters for adding a new record
params = {
'domain-name': self.domain_id,
'record-type': type,
'host': self._relative_name(name),
'record': content
}
if self.options['ttl']:
params['ttl'] = self.options['ttl']
if self.options['priority']:
params['priority'] = self.options['priority']
if self.options['weight']:
params['weight'] = self.options['weight']
if self.options['port']:
params['port'] = self.options['port']
# Add new record by calling the ClouDNS API
payload = self._post('/dns/add-record.json', params)
logger.debug('create_record: %s', payload)
# Error handling is already covered by self._request
return True
def list_records(self, type=None, name=None, content=None):
# Build parameters to make use of the built-in API filtering
params = {'domain-name': self.domain_id}
if type:
params['type'] = type
if name:
params['host'] = self._relative_name(name)
# Fetch and parse all records for the given zone
payload = self._get('/dns/records.json', params)
payload = payload if not isinstance(payload, list) else {}
records = []
for record in payload.values():
records.append({
'type': record['type'],
'name': self._full_name(record['host']),
'ttl': record['ttl'],
'content': record['record'],
'id': record['id']
})
# Filter by content manually as API does not support that
if content:
records = [record for record in records if record['content'] == content]
# Print records as debug output and return them
logger.debug('list_records: %s', records)
return records
def update_record(self, identifier, type=None, name=None, content=None):
# Try to find record if no identifier was specified
if not identifier:
identifier = self._find_record_identifier(type, name, None)
# Build parameters for updating an existing record
params = {'domain-name': self.domain_id, 'record-id': identifier}
if name:
params['host'] = self._relative_name(name)
if content:
params['record'] = content
if self.options.get('ttl'):
params['ttl'] = self.options.get('ttl')
if self.options['priority']:
params['priority'] = self.options['priority']
if self.options['weight']:
params['weight'] = self.options['weight']
if self.options['port']:
params['port'] = self.options['port']
# Update existing record by calling the ClouDNS API
payload = self._post('/dns/mod-record.json', params)
logger.debug('update_record: %s', payload)
# Error handling is already covered by self._request
return True
def delete_record(self, identifier=None, type=None, name=None, content=None):
# Try to find record if no identifier was specified
delete_record_id = []
if not identifier:
records = self.list_records(type, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
logger.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
# Delete existing record by calling the ClouDNS API
payload = self._post('/dns/delete-record.json', {'domain-name': self.domain_id, 'record-id': record_id})
logger.debug('delete_record: %s', True)
# Error handling is already covered by self._request
return True
def _is_given_option(self, key):
fallback_fn = self.engine_overrides.get('fallbackFn', (lambda x: None))
return self.options[key] and self.options[key] != fallback_fn(key)
def _build_authentication_data(self):
if not self.options['auth_password']:
raise Exception('No valid authentication data passed, expected: auth-password')
if self._is_given_option('auth_id'):
return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}
elif self._is_given_option('auth_subid'):
return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}
elif self._is_given_option('auth_subuser'):
return {'sub-auth-user': self.options['auth_subuser'], 'auth-password': self.options['auth_password']}
elif self.options['auth_id'] or self.options['auth_subid'] or self.options['auth_subuser']:
# All the options were passed with a fallback value, return an empty dictionary.
return {}
else:
raise Exception('No valid authentication data passed, expected: auth-id, auth-subid, auth-subuser')
def _find_record_identifier(self, type, name, content):
records = self.list_records(type, name, content)
logger.debug('records: %s', records)
if len(records) == 1:
return records[0]['id']
else:
raise Exception('Record identifier could not be found.')
def _request(self, action='GET', url='/', data=None, query_params=None):
# Set default values for missing arguments
data = data if data else {}
query_params = query_params if query_params else {}
# Merge authentication data into request
if action == 'GET':
query_params.update(self._build_authentication_data())
else:
data.update(self._build_authentication_data())
# Fire request against ClouDNS API and parse result as JSON
r = requests.request(action, self.api_endpoint + url, params=query_params, data=data)
r.raise_for_status()
payload = r.json()
# Check ClouDNS specific status code and description
if 'status' in payload and 'statusDescription' in payload and payload['status'] != 'Success':
raise Exception('ClouDNS API request has failed: ' + payload['statusDescription'])
# Return payload
return payload
<|code_end|>
|
api change of online.net broke lexicon plugin
Hi,
Blame them ! i'm working on it...
| lexicon/providers/online.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify private api token")
def to_data(type, content):
if type == "TXT":
return '"{0}"'.format(content)
else:
return content
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.zone_name = 'Zone Automatic Lexicon '
self.domain_id = None
self.passive_zone = None
self.active_zone = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.online.net/api/v1')
def authenticate(self):
payload = self._get('/domain/')
domain = self.options['domain']
did = None
for row in payload:
if row['name'] == domain:
did = row['id']
break
if did is None:
raise Exception('No domain found')
self.domain_id = did
self.init_zones()
def list_zones(self):
return self._get('/domain/{0}/version'.format(self.domain_id))
def init_zones(self):
# sets current zone version
zone_name_a = self.zone_name + 'A'
zone_name_b = self.zone_name + 'B'
active_row = None
passive_row = None
for row in self.list_zones():
if row['active'] == True:
active_row = row
elif row['name'] == zone_name_a or row['name'] == zone_name_b:
passive_row = row
if passive_row is None:
passive_row = self._post('/domain/{0}/version'.format(self.domain_id), {
'name': zone_name_b if active_row['name'] == zone_name_a else zone_name_a
})
self.active_zone = active_row['uuid_ref']
self.passive_zone = passive_row['uuid_ref']
self.update_passive_zone()
def update_passive_zone(self):
self._put(
'/domain/{0}/version/{1}/zone_from_bind'.format(
self.domain_id,
self.passive_zone
),
self.get_bind_zone()
)
def get_bind_zone(self):
records = self.list_zone_records(self.active_zone)
# then convert records to bind format
bindStr = ''
for record in records:
bindStr = bindStr + '{0} {1} IN {2} {3}{4}\n'.format(
record['name'] or '@',
record['ttl'],
record['type'],
'{0} '.format(record['aux']) if 'aux' in record else '',
record['data'] or ''
)
return bindStr
def enable_zone(self):
zone = self.passive_zone
if zone is None:
raise Exception("Could not enable uninitialized passive_zone")
payload = self._patch('/domain/{0}/version/{1}/enable'.format(
self.domain_id,
zone
))
self.passive_zone = self.active_zone
self.active_zone = zone
self.update_passive_zone()
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
try:
record = self.find_record(type, name, content)
if record is not None:
return True
record = {
'name': self._fqdn_name(name),
'type': type,
'data': to_data(type, content),
'priority': self.options['priority'] or '',
'ttl': self.options['ttl'] or ''
}
payload = self._post(
'/domain/{0}/version/{1}/zone'.format(
self.domain_id,
self.passive_zone
),
record
)
except Exception as e:
logger.debug(e)
return False
self.enable_zone()
logger.debug('create_record: %s', True)
return True
def find_zone_records(self, zone, type=None, name=None, content=None):
records = []
for record in self.list_zone_records(zone):
processed_record = {
'id': record['id'],
'type': record['type'],
'name': self._full_name(record['name']),
'ttl': record['ttl'],
'content': record['data'],
'priority': record['aux'] if 'aux' in record else ''
}
records.append(self._clean_TXT_record(processed_record))
if type:
records = [record for record in records if record['type'] == type]
if name:
fullName = self._full_name(name)
records = [record for record in records if record['name'] == fullName]
if content:
records = [record for record in records if record['content'] == content]
logger.debug('list_records: %s', records)
return records
def list_zone_records(self, zone_id):
return self._get('/domain/{0}/version/{1}/zone'.format(self.domain_id, zone_id))
def list_records(self, type=None, name=None, content=None):
return self.find_zone_records(self.passive_zone, type, name, content)
def find_record(self, type=None, name=None, content=None):
record = None
records = self.list_records(type, name, content)
if len(records) < 1:
return None
else:
return records[0]
# Create or update a record.
def update_record(self, id, type=None, name=None, content=None):
record = self.find_record(type, name)
if record is None:
logger.debug("cannot find record to update: %s %s %s", id, type, name)
return True
if type:
record['type'] = type
if name:
record['name'] = self._fqdn_name(name)
if content:
record['data'] = to_data(type, content)
if self.options.get('ttl'):
record['ttl'] = self.options.get('ttl')
# it is weird that 'aux' becomes 'priority' in online's api
if self.options['priority']:
record['priority'] = self.options['priority']
if id is None:
id = record['id']
record.pop('id')
try:
payload = self._patch('/domain/{0}/version/{1}/zone/{2}'.format(
self.domain_id,
self.passive_zone,
id
), record)
except Exception as e:
logger.debug(e)
return False
self.enable_zone()
# If it didn't raise from the http status code, then we're good
logger.debug('update_record: %s', id)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, id=None, type=None, name=None, content=None):
records = self.list_records(type, name, content)
if len(records) == 0:
logger.debug("Cannot find records %s %s %s", type, name, content)
return False
logger.debug('delete_records: %s records found', len(records))
try:
for record in records:
payload = self._delete('/domain/{0}/version/{1}/zone/{2}'.format(
self.domain_id,
self.passive_zone,
record['id']
))
except Exception as e:
logger.debug(e)
return False
self.enable_zone()
# is always True at this point, if a non 200 response is returned an error is raised.
logger.debug('delete_record: %s', True)
return True
def _patch(self, url='/', data=None, query_params=None):
return self._request('PATCH', url, data=data, query_params=query_params)
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if query_params is None:
query_params = {}
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer {0}'.format(self.options['auth_token'])
}
if data is not None:
if type(data) is str:
headers['Content-Type'] = 'text/plain';
else:
headers['Content-Type'] = 'application/json';
data = json.dumps(data)
r = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=data,
headers=headers
)
r.raise_for_status() # if the request fails for any reason, throw an error.
return r.text and r.json() or ''
<|code_end|>
| lexicon/providers/online.py
<|code_start|>from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify private api token")
def to_data(type, content):
if type == "TXT":
return '"{0}"'.format(content)
else:
return content
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.zone_name = 'Zone Automatic Lexicon '
self.passive_zone = None
self.active_zone = None
self.domain_id = self.options['domain']
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.online.net/api/v1')
def authenticate(self):
self.init_zones()
def list_zones(self):
return self._get('/domain/{0}/version'.format(self.domain_id))
def init_zones(self):
# sets current zone version
zone_name_a = self.zone_name + 'A'
zone_name_b = self.zone_name + 'B'
active_row = None
passive_row = None
for row in self.list_zones():
if row['active'] == True:
active_row = row
elif row['name'] == zone_name_a or row['name'] == zone_name_b:
passive_row = row
if passive_row is None:
passive_row = self._post('/domain/{0}/version'.format(self.domain_id), {
'name': zone_name_b if active_row['name'] == zone_name_a else zone_name_a
})
self.active_zone = active_row['uuid_ref']
self.passive_zone = passive_row['uuid_ref']
self.update_passive_zone()
def update_passive_zone(self):
self._put(
'/domain/{0}/version/{1}/zone_from_bind'.format(
self.domain_id,
self.passive_zone
),
self.get_bind_zone()
)
def get_bind_zone(self):
records = self.list_zone_records(self.active_zone)
# then convert records to bind format
bindStr = ''
for record in records:
bindStr = bindStr + '{0} {1} IN {2} {3}{4}\n'.format(
record['name'] or '@',
record['ttl'],
record['type'],
'{0} '.format(record['aux']) if 'aux' in record else '',
record['data'] or ''
)
return bindStr
def enable_zone(self):
zone = self.passive_zone
if zone is None:
raise Exception("Could not enable uninitialized passive_zone")
payload = self._patch('/domain/{0}/version/{1}/enable'.format(
self.domain_id,
zone
))
self.passive_zone = self.active_zone
self.active_zone = zone
self.update_passive_zone()
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
try:
record = self.find_record(type, name, content)
if record is not None:
return True
record = {
'name': self._fqdn_name(name),
'type': type,
'data': to_data(type, content),
'priority': self.options['priority'] or '',
'ttl': self.options['ttl'] or ''
}
payload = self._post(
'/domain/{0}/version/{1}/zone'.format(
self.domain_id,
self.passive_zone
),
record
)
except Exception as e:
logger.debug(e)
return False
self.enable_zone()
logger.debug('create_record: %s', True)
return True
def find_zone_records(self, zone, type=None, name=None, content=None):
records = []
for record in self.list_zone_records(zone):
processed_record = {
'id': record['id'],
'type': record['type'],
'name': self._full_name(record['name']),
'ttl': record['ttl'],
'content': record['data'],
'priority': record['aux'] if 'aux' in record else ''
}
records.append(self._clean_TXT_record(processed_record))
if type:
records = [record for record in records if record['type'] == type]
if name:
fullName = self._full_name(name)
records = [record for record in records if record['name'] == fullName]
if content:
records = [record for record in records if record['content'] == content]
logger.debug('list_records: %s', records)
return records
def list_zone_records(self, zone_id):
return self._get('/domain/{0}/version/{1}/zone'.format(self.domain_id, zone_id))
def list_records(self, type=None, name=None, content=None):
return self.find_zone_records(self.passive_zone, type, name, content)
def find_record(self, type=None, name=None, content=None):
record = None
records = self.list_records(type, name, content)
if len(records) < 1:
return None
else:
return records[0]
# Create or update a record.
def update_record(self, id, type=None, name=None, content=None):
record = self.find_record(type, name)
if record is None:
logger.debug("cannot find record to update: %s %s %s", id, type, name)
return True
if type:
record['type'] = type
if name:
record['name'] = self._fqdn_name(name)
if content:
record['data'] = to_data(type, content)
if self.options.get('ttl'):
record['ttl'] = self.options.get('ttl')
# it is weird that 'aux' becomes 'priority' in online's api
if self.options['priority']:
record['priority'] = self.options['priority']
if id is None:
id = record['id']
record.pop('id')
try:
payload = self._patch('/domain/{0}/version/{1}/zone/{2}'.format(
self.domain_id,
self.passive_zone,
id
), record)
except Exception as e:
logger.debug(e)
return False
self.enable_zone()
# If it didn't raise from the http status code, then we're good
logger.debug('update_record: %s', id)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, id=None, type=None, name=None, content=None):
records = self.list_records(type, name, content)
if len(records) == 0:
logger.debug("Cannot find records %s %s %s", type, name, content)
return False
logger.debug('delete_records: %s records found', len(records))
try:
for record in records:
payload = self._delete('/domain/{0}/version/{1}/zone/{2}'.format(
self.domain_id,
self.passive_zone,
record['id']
))
except Exception as e:
logger.debug(e)
return False
self.enable_zone()
# is always True at this point, if a non 200 response is returned an error is raised.
logger.debug('delete_record: %s', True)
return True
def _patch(self, url='/', data=None, query_params=None):
return self._request('PATCH', url, data=data, query_params=query_params)
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if query_params is None:
query_params = {}
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer {0}'.format(self.options['auth_token'])
}
if data is not None:
if type(data) is str:
headers['Content-Type'] = 'text/plain';
else:
headers['Content-Type'] = 'application/json';
data = json.dumps(data)
r = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=data,
headers=headers
)
r.raise_for_status() # if the request fails for any reason, throw an error.
return r.text and r.json() or ''
<|code_end|>
|
test_hetzner tests mostly fail (without internet access?)
When running builds of packages for openSUSE, we have all builds in the isolated environment without a network connection. Perhaps it is the reason, why most ``test_hetzner`` tests fail (all other pass).
```
[ 6s] + py.test3 tests -v -k test_hetzner
[ 6s] ============================= test session starts ==============================
[ 6s] platform linux -- Python 3.6.5, pytest-3.10.1, py-1.7.0, pluggy-0.8.0 -- /usr/bin/python3
[ 6s] cachedir: .pytest_cache
[ 6s] rootdir: /home/abuild/rpmbuild/BUILD/lexicon-3.0.7, inifile:
[ 6s] plugins: xdist-1.23.2, forked-0.2, cov-2.6.0
[ 10s] collecting ... collected 1518 items / 1460 deselected
[ 10s]
[ 41s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_authenticate FAILED [ 1%]
[ 71s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_authenticate_with_unmanaged_domain_should_fail PASSED [ 3%]
[ 101s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_create_record_for_A_with_valid_name_and_content FAILED [ 5%]
[ 131s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_create_record_for_CNAME_with_valid_name_and_content FAILED [ 6%]
[ 162s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_create_record_for_TXT_with_fqdn_name_and_content FAILED [ 8%]
[ 192s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_create_record_for_TXT_with_full_name_and_content FAILED [ 10%]
[ 222s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_create_record_for_TXT_with_valid_name_and_content FAILED [ 12%]
[ 253s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_create_record_multiple_times_should_create_record_set FAILED [ 13%]
[ 283s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_create_record_with_duplicate_records_should_be_noop FAILED [ 15%]
[ 313s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_delete_record_by_filter_should_remove_record FAILED [ 17%]
[ 344s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_delete_record_by_filter_with_fqdn_name_should_remove_record FAILED [ 18%]
[ 374s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_delete_record_by_filter_with_full_name_should_remove_record FAILED [ 20%]
[ 404s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_delete_record_by_identifier_should_remove_record FAILED [ 22%]
[ 435s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_delete_record_with_record_set_by_content_should_leave_others_untouched FAILED [ 24%]
[ 465s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_delete_record_with_record_set_name_remove_all FAILED [ 25%]
[ 496s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_list_records_after_setting_ttl FAILED [ 27%]
[ 526s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_list_records_should_handle_record_sets FAILED [ 29%]
[ 526s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_list_records_should_return_empty_list_if_no_records_found SKIPPED [ 31%]
[ 526s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_list_records_with_arguments_should_filter_list SKIPPED [ 32%]
[ 556s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_list_records_with_fqdn_name_filter_should_return_record FAILED [ 34%]
[ 587s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_list_records_with_full_name_filter_should_return_record FAILED [ 36%]
[ 617s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_list_records_with_invalid_filter_should_be_empty_list FAILED [ 37%]
[ 647s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_list_records_with_name_filter_should_return_record FAILED [ 39%]
[ 677s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_list_records_with_no_arguments_should_list_all FAILED [ 41%]
[ 708s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_update_record_should_modify_record FAILED [ 43%]
[ 738s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_update_record_should_modify_record_name_specified FAILED [ 44%]
[ 768s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_update_record_with_fqdn_name_should_modify_record FAILED [ 46%]
[ 799s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_Provider_when_calling_update_record_with_full_name_should_modify_record FAILED [ 48%]
[ 799s] tests/providers/test_hetzner.py::HetznerRobotProviderTests::test_provider_module_shape PASSED [ 50%]
[ 829s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_authenticate FAILED [ 51%]
[ 859s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_authenticate_with_unmanaged_domain_should_fail PASSED [ 53%]
[ 889s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_create_record_for_A_with_valid_name_and_content FAILED [ 55%]
[ 920s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_create_record_for_CNAME_with_valid_name_and_content FAILED [ 56%]
[ 950s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_create_record_for_TXT_with_fqdn_name_and_content FAILED [ 58%]
[ 980s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_create_record_for_TXT_with_full_name_and_content FAILED [ 60%]
[ 1010s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_create_record_for_TXT_with_valid_name_and_content FAILED [ 62%]
[ 1041s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_create_record_multiple_times_should_create_record_set FAILED [ 63%]
[ 1071s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_create_record_with_duplicate_records_should_be_noop FAILED [ 65%]
[ 1101s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_delete_record_by_filter_should_remove_record FAILED [ 67%]
[ 1132s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_delete_record_by_filter_with_fqdn_name_should_remove_record FAILED [ 68%]
[ 1162s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_delete_record_by_filter_with_full_name_should_remove_record FAILED [ 70%]
[ 1192s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_delete_record_by_identifier_should_remove_record FAILED [ 72%]
[ 1223s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_delete_record_with_record_set_by_content_should_leave_others_untouched FAILED [ 74%]
[ 1253s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_delete_record_with_record_set_name_remove_all FAILED [ 75%]
[ 1283s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_list_records_after_setting_ttl FAILED [ 77%]
[ 1314s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_list_records_should_handle_record_sets FAILED [ 79%]
[ 1314s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_list_records_should_return_empty_list_if_no_records_found SKIPPED [ 81%]
[ 1314s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_list_records_with_arguments_should_filter_list SKIPPED [ 82%]
[ 1344s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_list_records_with_fqdn_name_filter_should_return_record FAILED [ 84%]
[ 1374s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_list_records_with_full_name_filter_should_return_record FAILED [ 86%]
[ 1405s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_list_records_with_invalid_filter_should_be_empty_list FAILED [ 87%]
[ 1435s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_list_records_with_name_filter_should_return_record FAILED [ 89%]
[ 1465s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_list_records_with_no_arguments_should_list_all FAILED [ 91%]
[ 1496s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_update_record_should_modify_record FAILED [ 93%]
[ 1526s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_update_record_should_modify_record_name_specified FAILED [ 94%]
[ 1556s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_update_record_with_fqdn_name_should_modify_record FAILED [ 96%]
[ 1587s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_Provider_when_calling_update_record_with_full_name_should_modify_record FAILED [ 98%]
[ 1587s] tests/providers/test_hetzner.py::HetznerKonsoleHProviderTests::test_provider_module_shape PASSED [100%]
[ 1587s]
[ 1587s] =================================== FAILURES ===================================
[ 1587s] _____________ HetznerRobotProviderTests.test_Provider_authenticate _____________
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_authenticate>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_authenticate(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:120:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226623.6936848
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001739263534546 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_create_record_for_A_with_valid_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_create_record_for_A_with_valid_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_A_with_valid_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:137:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226684.1878257
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001224994659424 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_create_record_for_CNAME_with_valid_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_create_record_for_CNAME_with_valid_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_CNAME_with_valid_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:142:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226714.5355732
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001864194869995 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_create_record_for_TXT_with_fqdn_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_create_record_for_TXT_with_fqdn_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_TXT_with_fqdn_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:159:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226744.8239799
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00143837928772 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_create_record_for_TXT_with_full_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_create_record_for_TXT_with_full_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_TXT_with_full_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:153:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226775.1904507
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.002041339874268 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_create_record_for_TXT_with_valid_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_create_record_for_TXT_with_valid_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_TXT_with_valid_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:147:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226805.4625132
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00130319595337 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_create_record_multiple_times_should_create_record_set
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_create_record_multiple_times_should_create_record_set>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_multiple_times_should_create_record_set(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:329:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226835.7801673
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001408338546753 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_create_record_with_duplicate_records_should_be_noop
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_create_record_with_duplicate_records_should_be_noop>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_with_duplicate_records_should_be_noop(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:317:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226866.2151842
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001322269439697 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_delete_record_by_filter_should_remove_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_delete_record_by_filter_should_remove_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_by_filter_should_remove_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:280:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226896.5229275
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001464366912842 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_delete_record_by_filter_with_fqdn_name_should_remove_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_delete_record_by_filter_with_fqdn_name_should_remove_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_by_filter_with_fqdn_name_should_remove_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:301:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226926.8393946
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00216007232666 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_delete_record_by_filter_with_full_name_should_remove_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_delete_record_by_filter_with_full_name_should_remove_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_by_filter_with_full_name_should_remove_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:290:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226957.2404466
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00147795677185 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_delete_record_by_identifier_should_remove_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_delete_record_by_identifier_should_remove_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_by_identifier_should_remove_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:271:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545226987.6605637
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001694917678833 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_delete_record_with_record_set_by_content_should_leave_others_untouched
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_delete_record_with_record_set_by_content_should_leave_others_untouched>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_with_record_set_by_content_should_leave_others_untouched(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:373:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227018.060289
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001522302627563 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_delete_record_with_record_set_name_remove_all
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_delete_record_with_record_set_name_remove_all>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_with_record_set_name_remove_all(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:358:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227048.4098914
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00169563293457 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_list_records_after_setting_ttl
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_list_records_after_setting_ttl>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_after_setting_ttl(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:207:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227078.7399654
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001705169677734 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_list_records_should_handle_record_sets
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_list_records_should_handle_record_sets>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_should_handle_record_sets(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:346:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227109.118544
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.000815868377686 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_list_records_with_fqdn_name_filter_should_return_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_list_records_with_fqdn_name_filter_should_return_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_fqdn_name_filter_should_return_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:195:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227139.438761
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00140929222107 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_list_records_with_full_name_filter_should_return_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_list_records_with_full_name_filter_should_return_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_full_name_filter_should_return_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:183:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227169.791035
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00222635269165 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_list_records_with_invalid_filter_should_be_empty_list
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_list_records_with_invalid_filter_should_be_empty_list>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_invalid_filter_should_be_empty_list(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:338:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227200.072255
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001331090927124 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_list_records_with_name_filter_should_return_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_list_records_with_name_filter_should_return_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_name_filter_should_return_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:173:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227230.3420138
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00171971321106 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_list_records_with_no_arguments_should_list_all
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_list_records_with_no_arguments_should_list_all>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_no_arguments_should_list_all(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:168:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227260.6709635
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001699209213257 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_update_record_should_modify_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_update_record_should_modify_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_update_record_should_modify_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:232:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227290.9411821
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.002132654190063 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_update_record_should_modify_record_name_specified
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_update_record_should_modify_record_name_specified>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_update_record_should_modify_record_name_specified(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:240:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227321.2468865
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00109076499939 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_update_record_with_fqdn_name_should_modify_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_update_record_with_fqdn_name_should_modify_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_update_record_with_fqdn_name_should_modify_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:258:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227351.6156437
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00177574157715 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerRobotProviderTests.test_Provider_when_calling_update_record_with_full_name_should_modify_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerRobotProviderTests testMethod=test_Provider_when_calling_update_record_with_full_name_should_modify_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_update_record_with_full_name_should_modify_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:248:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227381.9297016
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001366138458252 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] ___________ HetznerKonsoleHProviderTests.test_Provider_authenticate ____________
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_authenticate>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_authenticate(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:120:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227412.1908543
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00184440612793 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_create_record_for_A_with_valid_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_create_record_for_A_with_valid_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_A_with_valid_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:137:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227472.5597498
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001546382904053 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_create_record_for_CNAME_with_valid_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_create_record_for_CNAME_with_valid_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_CNAME_with_valid_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:142:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227502.8352175
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00182032585144 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_create_record_for_TXT_with_fqdn_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_create_record_for_TXT_with_fqdn_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_TXT_with_fqdn_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:159:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227533.1063695
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00209331512451 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_create_record_for_TXT_with_full_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_create_record_for_TXT_with_full_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_TXT_with_full_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:153:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227563.4163032
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.002171754837036 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_create_record_for_TXT_with_valid_name_and_content
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_create_record_for_TXT_with_valid_name_and_content>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_for_TXT_with_valid_name_and_content(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:147:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227593.6919627
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001668691635132 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_create_record_multiple_times_should_create_record_set
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_create_record_multiple_times_should_create_record_set>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_multiple_times_should_create_record_set(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:329:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227623.9842434
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00209879875183 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_create_record_with_duplicate_records_should_be_noop
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_create_record_with_duplicate_records_should_be_noop>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_create_record_with_duplicate_records_should_be_noop(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:317:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227654.274271
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001965522766113 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_delete_record_by_filter_should_remove_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_delete_record_by_filter_should_remove_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_by_filter_should_remove_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:280:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227684.612756
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.002180337905884 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_delete_record_by_filter_with_fqdn_name_should_remove_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_delete_record_by_filter_with_fqdn_name_should_remove_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_by_filter_with_fqdn_name_should_remove_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:301:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227714.911293
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001580953598022 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_delete_record_by_filter_with_full_name_should_remove_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_delete_record_by_filter_with_full_name_should_remove_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_by_filter_with_full_name_should_remove_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:290:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227745.2531846
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00243043899536 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_delete_record_by_identifier_should_remove_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_delete_record_by_identifier_should_remove_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_by_identifier_should_remove_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:271:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227775.5630276
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.0020112991333 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_delete_record_with_record_set_by_content_should_leave_others_untouched
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_delete_record_with_record_set_by_content_should_leave_others_untouched>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_with_record_set_by_content_should_leave_others_untouched(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:373:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227805.8652472
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001646280288696 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_delete_record_with_record_set_name_remove_all
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_delete_record_with_record_set_name_remove_all>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_delete_record_with_record_set_name_remove_all(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:358:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227836.2682204
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001182317733765 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_list_records_after_setting_ttl
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_list_records_after_setting_ttl>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_after_setting_ttl(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:207:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227866.6054685
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.000760555267334 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_list_records_should_handle_record_sets
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_list_records_should_handle_record_sets>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_should_handle_record_sets(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:346:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227896.9345942
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.000595569610596 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_list_records_with_fqdn_name_filter_should_return_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_list_records_with_fqdn_name_filter_should_return_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_fqdn_name_filter_should_return_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:195:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227927.345853
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.002305269241333 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_list_records_with_full_name_filter_should_return_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_list_records_with_full_name_filter_should_return_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_full_name_filter_should_return_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:183:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227957.6347275
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.000818967819214 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_list_records_with_invalid_filter_should_be_empty_list
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_list_records_with_invalid_filter_should_be_empty_list>
[ 1587s]
[ 1587s] @pytest.mark.ext_suite_1
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_invalid_filter_should_be_empty_list(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:338:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545227988.0226264
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00152277946472 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_list_records_with_name_filter_should_return_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_list_records_with_name_filter_should_return_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_name_filter_should_return_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:173:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545228018.3699582
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001624584197998 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_list_records_with_no_arguments_should_list_all
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_list_records_with_no_arguments_should_list_all>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_list_records_with_no_arguments_should_list_all(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:168:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545228048.6449227
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.000717878341675 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_update_record_should_modify_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_update_record_should_modify_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_update_record_should_modify_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:232:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545228079.003393
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001289129257202 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_update_record_should_modify_record_name_specified
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_update_record_should_modify_record_name_specified>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_update_record_should_modify_record_name_specified(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:240:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545228109.3683515
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.001031160354614 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_update_record_with_fqdn_name_should_modify_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_update_record_with_fqdn_name_should_modify_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_update_record_with_fqdn_name_should_modify_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:258:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545228139.6750817
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.000885009765625 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] HetznerKonsoleHProviderTests.test_Provider_when_calling_update_record_with_full_name_should_modify_record
[ 1587s]
[ 1587s] self = <test_hetzner.HetznerKonsoleHProviderTests testMethod=test_Provider_when_calling_update_record_with_full_name_should_modify_record>
[ 1587s]
[ 1587s] @_vcr_integration_test
[ 1587s] def test_Provider_when_calling_update_record_with_full_name_should_modify_record(self):
[ 1587s] > provider = self._construct_authenticated_provider()
[ 1587s]
[ 1587s] tests/providers/integration_tests.py:248:
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s] tests/providers/integration_tests.py:464: in _construct_authenticated_provider
[ 1587s] provider.authenticate()
[ 1587s] lexicon/providers/hetzner.py:168: in authenticate
[ 1587s] with self._session(self.domain, get_zone=False):
[ 1587s] /usr/lib64/python3.6/contextlib.py:81: in __enter__
[ 1587s] return next(self.gen)
[ 1587s] lexicon/providers/hetzner.py:589: in _session
[ 1587s] qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
[ 1587s] lexicon/providers/hetzner.py:462: in _get_dns_cname
[ 1587s] domain = dns.resolver.zone_for_name(name).to_text(True)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1156: in zone_for_name
[ 1587s] answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:1041: in query
[ 1587s] timeout = self._compute_timeout(start)
[ 1587s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 1587s]
[ 1587s] self = <dns.resolver.Resolver object at 0x7fda8101cc50>
[ 1587s] start = 1545228170.0485935
[ 1587s]
[ 1587s] def _compute_timeout(self, start):
[ 1587s] now = time.time()
[ 1587s] duration = now - start
[ 1587s] if duration < 0:
[ 1587s] if duration < -1:
[ 1587s] # Time going backwards is bad. Just give up.
[ 1587s] raise Timeout(timeout=duration)
[ 1587s] else:
[ 1587s] # Time went backwards, but only a little. This can
[ 1587s] # happen, e.g. under vmware with older linux kernels.
[ 1587s] # Pretend it didn't happen.
[ 1587s] now = start
[ 1587s] if duration >= self.lifetime:
[ 1587s] > raise Timeout(timeout=duration)
[ 1587s] E dns.exception.Timeout: The DNS operation timed out after 30.00108814239502 seconds
[ 1587s]
[ 1587s] /usr/lib/python3.6/site-packages/dns/resolver.py:858: Timeout
[ 1587s] ====== 50 failed, 4 passed, 4 skipped, 1460 deselected in 1580.97 seconds ======
```
[Full build log](https://github.com/AnalogJ/lexicon/files/2695322/lexicon-build-log.txt) is here.
| lexicon/providers/hetzner.py
<|code_start|>from __future__ import absolute_import
from __future__ import unicode_literals
from contextlib import contextmanager
import hashlib
import logging
import re
import time
import requests
from six import string_types
from urllib3.util.retry import Retry
# Due to optional requirement
try:
from bs4 import BeautifulSoup
import dns.resolver
import dns.zone
except ImportError:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = []
def ProviderParser(subparser):
subparser.add_argument('--auth-account',
help='specify type of Hetzner account: by default Hetzner Robot '
'(robot) or Hetzner konsoleH (konsoleh)')
subparser.add_argument('--auth-username', help='specify username of Hetzner account')
subparser.add_argument('--auth-password', help='specify password of Hetzner account')
subparser.add_argument('--linked',
help='if exists, uses linked CNAME as A|AAAA|TXT record name for edit '
'actions: by default (yes); Further restriction: Only enabled if '
'record name or raw FQDN record identifier \'type/name/content\' is '
'specified, and additionally for update actions the record name '
'remains the same',
default=str('yes'),
choices=['yes', 'no'])
subparser.add_argument('--propagated',
help='waits until record is publicly propagated after succeeded '
'create|update actions: by default (yes)',
default=str('yes'),
choices=['yes', 'no'])
subparser.add_argument('--latency',
help='specify latency, used during checks for publicly propagation '
'and additionally for Hetzner Robot after record edits: by default '
'30s (30)',
default=int(30),
type=int)
class Provider(BaseProvider):
"""
Implements the Hetzner DNS Provider.
There are two variants to manage DNS records on Hetzner: Hetzner Robot or
Hetzner konsoleH. Both do not provide a common API, therefore this provider
implements missing read and write methods in a generic way. For editing DNS
records on Hetzner, this provider manipulates and replaces the whole DNS zone.
Furthermore, there is no unique identifier to each record in the way that Lexicon
expects, why this provider implements a pseudo-identifer based on the record type,
name and content for use of the --identifier parameter. Supported identifier
formats are:
- hash generated|verified by 'list' command; e.g. '30fa112'
- raw concatenation of the record type, name (FQDN) and content (if possible
FQDN) with delimiter '/'; e.g. 'SRV/example.com./0 0 443 msx.example.com.'
or 'TXT/example.com./challengetoken'
Additional, this provider implements the option of replacing an A, AAAA or TXT record
name with an existent linked CNAME for edit actions via the --linked parameter and
the option of waiting until record is publicly propagated after succeeded create or
update actions via the --propagated parameter. As further restriction, the use of a
linked CNAME is only enabled if the record type & record name or the raw identifier are
specified, and additionally for the update action the record name remains the same.
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api = {
'robot': {
'endpoint': 'https://robot.your-server.de',
'filter': [{'name': 'div', 'attrs': {'id': 'center_col'}}],
'auth': {
'endpoint': 'https://accounts.hetzner.com',
'GET': {'url': '/login'},
'POST': {'url': '/login_check'},
'filter': [{'name': 'form', 'attrs': {'id': 'login-form'}}],
'user': '_username',
'pass': '_password'
},
'exit': {
'GET': {'url': '/login/logout/r/true'}
},
'domain_id': {
'GET': {'url': '/dns/index/page/<index>'},
'filter': [
{'name': 'div', 'attrs': {'id': 'center_col'}},
{'name': 'table', 'attrs': {'class': 'box_title'}}
],
'domain': [{'name': 'td', 'attrs': {'class': 'title'}}],
'id': {'attr': 'onclick', 'regex': r'\'(\d+)\''}
},
'zone': {
'GET': [{'url': '/dns/update/id/<id>'}],
'POST': {'url': '/dns/update'},
'filter': [
{'name': 'div', 'attrs': {'id': 'center_col'}},
{'name': 'ul', 'attrs': {'class': 'error_list'}}
],
'file': 'zonefile'
}
},
'konsoleh': {
'endpoint': 'https://konsoleh.your-server.de',
'filter': [{'name': 'div', 'attrs': {'id': 'content'}}],
'auth': {
'GET': {},
'POST': {'url': '/login.php'},
'filter': [{'name': 'form', 'attrs': {'id': 'loginform'}}],
'user': 'login_user_inputbox',
'pass': 'login_pass_inputbox'
},
'exit': {
'GET': {'url': '/logout.php'}
},
'domain_id': {
'GET': {'params': {'page': '<index>'}},
'filter': [
{'name': 'div', 'attrs': {'id': 'domainlist'}},
{'name': 'dl'},
{'name': 'a'}
],
'domain': [{'name': 'strong'}],
'id': {'attr': 'href', 'regex': r'=(D\d+)'}
},
'zone': {
'GET': [
{'params': {'domain_number': '<id>'}},
{'url': '/dns.php', 'params': {'dnsaction2': 'editintextarea'}}
],
'POST': {'url': '/dns.php'},
'filter': [
{'name': 'div', 'attrs': {'id': 'content'}},
{'name': 'div', 'attrs': {'class': 'error'}}
],
'file': 'zone_file1'
}
}
}
self.session = None
self.account = self._get_provider_option('auth_account')
if self.account in (None, 'robot', 'konsoleh'):
self.account = self.account if self.account else 'robot'
else:
LOGGER.error('Hetzner => Argument for --auth-account is invalid: \'%s\' '
'(choose from \'robot\' or \'konsoleh\')', self.account)
raise AssertionError
self.username = self._get_provider_option('auth_username')
assert self.username is not None
self.password = self._get_provider_option('auth_password')
assert self.password is not None
def authenticate(self):
"""
Connects to Hetzner account and returns, if authentification was
successful and the domain or CNAME target is managed by this account.
"""
with self._session(self.domain, get_zone=False):
return True
def create_record(self, type, name, content):
"""
Connects to Hetzner account, adds a new record to the zone and returns a
boolean, if creation was successful or not. Needed record type, name and
content for record to create.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if not type or not name or not content:
LOGGER.warning('Hetzner => Record has no type|name|content specified')
return False
# Add record to zone
name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name)
rrset = ddata['zone']['data'].get_rdataset(name, rdtype=type, create=True)
for rdata in rrset:
if self._convert_content(type, content) == rdata.to_text():
LOGGER.info('Hetzner => Record with content \'%s\' already exists',
content)
return True
ttl = (rrset.ttl if rrset.ttl > 0
and rrset.ttl < self._get_lexicon_option('ttl')
else self._get_lexicon_option('ttl'))
rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype,
ttl, self._convert_content(type, content))
rrset.update(rdataset)
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
if synced_change:
self._propagated_record(type, name, self._convert_content(type, content),
ddata['nameservers'])
return synced_change
def list_records(self, type=None, name=None, content=None):
"""
Connects to Hetzner account and returns a list of records filtered by record
type, name and content. The list is empty if no records found.
"""
with self._session(self.domain, self.domain_id) as ddata:
name = self._fqdn_name(name) if name else None
return self._list_records(ddata['zone']['data'], type, name, content)
def update_record(self, identifier=None, type=None, name=None, content=None):
"""
Connects to Hetzner account, changes an existing record and returns a boolean,
if update was successful or not. Needed identifier or type & name to lookup
over all records of the zone for exactly one record to update.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if identifier:
dtype, dname, dcontent = self._parse_identifier(identifier, ddata['zone']['data'])
if dtype and dname and dcontent:
type = type if type else dtype
name = name if name else dname
content = content if content else dcontent
else:
LOGGER.warning('Hetzner => Record with identifier \'%s\' does not exist',
identifier)
return False
elif type and name and content:
dtype, dname, dcontent = type, name, None
else:
LOGGER.warning('Hetzner => Record has no type|name|content specified')
return False
dname = ddata['cname'] if ddata['cname'] else self._fqdn_name(dname)
records = self._list_records(ddata['zone']['data'], dtype, dname, dcontent)
if len(records) == 1:
# Remove record from zone
rrset = ddata['zone']['data'].get_rdataset(records[0]['name']+'.',
rdtype=records[0]['type'])
rdatas = []
for rdata in rrset:
if self._convert_content(records[0]['type'],
records[0]['content']) != rdata.to_text():
rdatas.append(rdata.to_text())
if rdatas:
rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype,
records[0]['ttl'], rdatas)
ddata['zone']['data'].replace_rdataset(records[0]['name']+'.', rdataset)
else:
ddata['zone']['data'].delete_rdataset(records[0]['name']+'.',
records[0]['type'])
# Add record to zone
name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name)
rrset = ddata['zone']['data'].get_rdataset(name, rdtype=type, create=True)
synced_change = False
for rdata in rrset:
if self._convert_content(type, content) == rdata.to_text():
LOGGER.info('Hetzner => Record with content \'%s\' already exists',
content)
synced_change = True
break
if not synced_change:
ttl = (rrset.ttl if rrset.ttl > 0
and rrset.ttl < self._get_lexicon_option('ttl')
else self._get_lexicon_option('ttl'))
rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype, ttl,
self._convert_content(type, content))
rrset.update(rdataset)
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
if synced_change:
self._propagated_record(type, name, self._convert_content(type, content),
ddata['nameservers'])
return synced_change
LOGGER.warning('Hetzner => Record lookup has not only one match')
return False
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""
Connects to Hetzner account, removes an existing record from the zone and returns a
boolean, if deletion was successful or not. Uses identifier or type, name & content to
lookup over all records of the zone for one or more records to delete.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if identifier:
type, name, content = self._parse_identifier(identifier, ddata['zone']['data'])
if type is None or name is None or content is None:
LOGGER.info('Hetzner => Record with identifier \'%s\' does not exist',
identifier)
return True
name = ddata['cname'] if ddata['cname'] else (self._fqdn_name(name) if name else None)
records = self._list_records(ddata['zone']['data'], type, name, content)
if records:
# Remove records from zone
for record in records:
rrset = ddata['zone']['data'].get_rdataset(record['name']+'.',
rdtype=record['type'])
rdatas = []
for rdata in rrset:
if self._convert_content(record['type'],
record['content']) != rdata.to_text():
rdatas.append(rdata.to_text())
if rdatas:
rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype,
record['ttl'], rdatas)
ddata['zone']['data'].replace_rdataset(record['name']+'.', rdataset)
else:
ddata['zone']['data'].delete_rdataset(record['name']+'.', record['type'])
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
return synced_change
LOGGER.info('Hetzner => Record lookup has no matches')
return True
###############################################################################
# Provider base helpers
###############################################################################
@staticmethod
def _create_identifier(rdtype, name, content):
"""
Creates hashed identifier based on full qualified record type, name & content
and returns hash.
"""
sha256 = hashlib.sha256()
sha256.update((rdtype + '/').encode('UTF-8'))
sha256.update((name + '/').encode('UTF-8'))
sha256.update(content.encode('UTF-8'))
return sha256.hexdigest()[0:7]
def _parse_identifier(self, identifier, zone=None):
"""
Parses the record identifier and returns type, name & content of the associated record
as tuple. The tuple is empty if no associated record found.
"""
rdtype, name, content = None, None, None
if len(identifier) > 7:
parts = identifier.split('/')
rdtype, name, content = parts[0], parts[1], '/'.join(parts[2:])
else:
records = self._list_records(zone)
for record in records:
if record['id'] == identifier:
rdtype, name, content = record['type'], record['name']+'.', record['content']
return rdtype, name, content
def _convert_content(self, rdtype, content):
"""
Converts type dependent record content into well formed and fully qualified
content for domain zone and returns content.
"""
if rdtype == 'TXT':
if content[0] != '"':
content = '"' + content
if content[-1] != '"':
content += '"'
if rdtype in ('CNAME', 'MX', 'NS', 'SRV'):
if content[-1] != '.':
content = self._fqdn_name(content)
return content
def _list_records(self, zone, rdtype=None, name=None, content=None):
"""
Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found.
"""
records = []
rrsets = zone.iterate_rdatasets() if zone else []
for rname, rdataset in rrsets:
rtype = dns.rdatatype.to_text(rdataset.rdtype)
if ((not rdtype or rdtype == rtype)
and (not name or name == rname.to_text())):
for rdata in rdataset:
rdata = rdata.to_text()
if (not content or self._convert_content(rtype, content) == rdata):
raw_rdata = self._clean_TXT_record({'type': rtype,
'content': rdata})['content']
data = {
'type': rtype,
'name': rname.to_text(True),
'ttl': int(rdataset.ttl),
'content': raw_rdata,
'id': Provider._create_identifier(rtype, rname.to_text(), raw_rdata)
}
records.append(data)
return records
def _request(self, action='GET', url='/', data=None, query_params=None):
"""
Requests to Hetzner by current session and returns the response.
"""
if data is None:
data = {}
if query_params is None:
query_params = {}
response = self.session.request(action, self.api[self.account]['endpoint'] + url,
params=query_params, data=data)
response.raise_for_status()
return response
###############################################################################
# Provider option helpers
###############################################################################
@staticmethod
def _dns_lookup(name, rdtype, nameservers=None):
"""
Looks on specified or default system domain nameservers to resolve record type
& name and returns record set. The record set is empty if no propagated
record found.
"""
rrset = dns.rrset.from_text(name, 0, 1, rdtype)
try:
resolver = dns.resolver.Resolver()
if nameservers:
resolver.nameservers = nameservers
rrset = resolver.query(name, rdtype)
for rdata in rrset:
LOGGER.debug('DNS Lookup => %s %s %s %s',
rrset.name.to_text(), dns.rdataclass.to_text(rrset.rdclass),
dns.rdatatype.to_text(rrset.rdtype), rdata.to_text())
except dns.exception.DNSException as error:
LOGGER.debug('DNS Lookup => %s', error)
return rrset
@staticmethod
def _get_nameservers(domain):
"""
Looks for domain nameservers and returns the IPs of the nameservers as a list.
The list is empty, if no nameservers were found. Needed associated domain zone
name for lookup.
"""
nameservers = []
rdtypes_ns = ['SOA', 'NS']
rdtypes_ip = ['A', 'AAAA']
for rdtype_ns in rdtypes_ns:
for rdata_ns in Provider._dns_lookup(domain, rdtype_ns):
for rdtype_ip in rdtypes_ip:
for rdata_ip in Provider._dns_lookup(rdata_ns.to_text().split(' ')[0],
rdtype_ip):
if rdata_ip.to_text() not in nameservers:
nameservers.append(rdata_ip.to_text())
LOGGER.debug('DNS Lookup => %s IN NS %s', domain, ' '.join(nameservers))
return nameservers
@staticmethod
def _get_dns_cname(name, link=False):
"""
Looks for associated domain zone, nameservers and linked record name until no
more linked record name was found for the given fully qualified record name or
the CNAME lookup was disabled, and then returns the parameters as a tuple.
"""
domain = dns.resolver.zone_for_name(name).to_text(True)
nameservers = Provider._get_nameservers(domain)
cname = None
links, max_links = 0, 5
while link:
if links >= max_links:
LOGGER.error('Hetzner => Record %s has more than %d linked CNAME '
'records. Reduce the amount of CNAME links!',
name, max_links)
raise AssertionError
qname = cname if cname else name
rrset = Provider._dns_lookup(qname, 'CNAME', nameservers)
if rrset:
links += 1
cname = rrset[0].to_text()
qdomain = dns.resolver.zone_for_name(cname)
if domain != qdomain.to_text(True):
domain = qdomain.to_text(True)
nameservers = Provider._get_nameservers(qdomain)
else:
link = False
if cname:
LOGGER.info('Hetzner => Record %s has CNAME %s', name, cname)
return domain, nameservers, cname
def _link_record(self):
"""
Checks restrictions for use of CNAME lookup and returns a tuple of the
fully qualified record name to lookup and a boolean, if a CNAME lookup
should be done or not. The fully qualified record name is empty if no
record name is specified by this provider.
"""
action = self._get_lexicon_option('action')
identifier = self._get_lexicon_option('identifier')
rdtype = self._get_lexicon_option('type')
name = (self._fqdn_name(self._get_lexicon_option('name'))
if self._get_lexicon_option('name') else None)
link = True if self._get_provider_option('linked') == 'yes' else False
qname = name
if identifier:
rdtype, name, _ = self._parse_identifier(identifier)
if action != 'list' and rdtype in ('A', 'AAAA', 'TXT') and name and link:
if action != 'update' or name == qname or not qname:
LOGGER.info('Hetzner => Enable CNAME lookup '
'(see --linked parameter)')
return qname, True
LOGGER.info('Hetzner => Disable CNAME lookup '
'(see --linked parameter)')
return qname, False
def _propagated_record(self, rdtype, name, content, nameservers=None):
"""
If the publicly propagation check should be done, waits until the domain nameservers
responses with the propagated record type, name & content and returns a boolean,
if the publicly propagation was successful or not.
"""
latency = self._get_provider_option('latency')
propagated = True if self._get_provider_option('propagated') == 'yes' else False
if propagated:
retry, max_retry = 0, 20
while retry < max_retry:
for rdata in Provider._dns_lookup(name, rdtype, nameservers):
if content == rdata.to_text():
LOGGER.info('Hetzner => Record %s has %s %s', name, rdtype, content)
return True
retry += 1
retry_log = (', retry ({}/{}) in {}s...'.format((retry + 1), max_retry, latency)
if retry < max_retry else '')
LOGGER.info('Hetzner => Record is not propagated%s', retry_log)
time.sleep(latency)
return False
###############################################################################
# Hetzner API helpers
###############################################################################
@staticmethod
def _filter_dom(dom, filters, last_find_all=False):
"""
If not exists, creates an DOM from a given session response, then filters the DOM
via given API filters and returns the filtered DOM. The DOM is empty if the filters
have no match.
"""
if isinstance(dom, string_types):
dom = BeautifulSoup(dom, 'html.parser')
for idx, find in enumerate(filters, start=1):
if not dom:
break
name, attrs = find.get('name'), find.get('attrs', {})
if len(filters) == idx and last_find_all:
dom = dom.find_all(name, attrs=attrs) if name else dom.find_all(attrs=attrs)
else:
dom = dom.find(name, attrs=attrs) if name else dom.find(attrs=attrs)
return dom
@staticmethod
def _extract_hidden_data(dom):
"""
Extracts hidden input data from DOM and returns the data as dictionary.
"""
input_tags = dom.find_all('input', attrs={'type': 'hidden'})
data = {}
for input_tag in input_tags:
data[input_tag['name']] = input_tag['value']
return data
@staticmethod
def _extract_domain_id(string, regex):
"""
Extracts domain ID from given string and returns the domain ID.
"""
regex = re.compile(regex)
match = regex.search(string)
if not match:
return False
return str(match.group(1))
@contextmanager
def _session(self, domain, domain_id=None, get_zone=True):
"""
Generates, authenticates and exits session to Hetzner account, and
provides tuple of additional needed domain data (domain nameservers,
zone and linked record name) to public methods. The tuple parameters
are empty if not existent or specified. Exits session and raises error
if provider fails during session.
"""
name, link = self._link_record()
qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
qdomain_id, zone = domain_id, None
self.session = self._auth_session(self.username, self.password)
try:
if not domain_id or qdomain != domain:
qdomain_id = self._get_domain_id(qdomain)
if qdomain == domain:
self.domain_id = qdomain_id
if get_zone:
zone = self._get_zone(qdomain, qdomain_id)
yield {'nameservers': nameservers, 'zone': zone, 'cname': cname}
except Exception as exc:
raise exc
finally:
self._exit_session()
def _auth_session(self, username, password):
"""
Creates session to Hetzner account, authenticates with given credentials and
returns the session, if authentication was successful. Otherwise raises error.
"""
api = self.api[self.account]['auth']
endpoint = api.get('endpoint', self.api[self.account]['endpoint'])
session = requests.Session()
session_retries = Retry(total=10, backoff_factor=0.5)
session_adapter = requests.adapters.HTTPAdapter(max_retries=session_retries)
session.mount('https://', session_adapter)
response = session.request('GET', endpoint + api['GET'].get('url', '/'))
dom = Provider._filter_dom(response.text, api['filter'])
data = Provider._extract_hidden_data(dom)
data[api['user']], data[api['pass']] = username, password
response = session.request('POST', endpoint + api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to authenticate session with %s account \'%s\': '
'Invalid credentials',
self.account, username)
raise AssertionError
LOGGER.info('Hetzner => Authenticate session with %s account \'%s\'',
self.account, username)
return session
def _exit_session(self):
"""
Exits session to Hetzner account and returns.
"""
api = self.api[self.account]
response = self._get(api['exit']['GET']['url'])
if not Provider._filter_dom(response.text, api['filter']):
LOGGER.info('Hetzner => Exit session')
else:
LOGGER.warning('Hetzner => Unable to exit session')
self.session = None
return True
def _get_domain_id(self, domain):
"""
Pulls all domains managed by authenticated Hetzner account, extracts their IDs
and returns the ID for the current domain, if exists. Otherwise raises error.
"""
api = self.api[self.account]['domain_id']
qdomain = dns.name.from_text(domain).to_unicode(True)
domains, last_count, page = {}, -1, 0
while last_count != len(domains):
last_count = len(domains)
page += 1
url = (api['GET'].copy()).get('url', '/').replace('<index>', str(page))
params = api['GET'].get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<index>', str(page))
response = self._get(url, query_params=params)
domain_tags = Provider._filter_dom(response.text, api['filter'], True)
for domain_tag in domain_tags:
domain_id = Provider._extract_domain_id(dict(domain_tag.attrs)[api['id']['attr']],
api['id']['regex'])
domain = (Provider._filter_dom(domain_tag, api['domain'])
.renderContents().decode('UTF-8'))
domains[domain] = domain_id
if domain == qdomain:
LOGGER.info('Hetzner => Get ID %s for domain %s', domain_id, qdomain)
return domain_id
LOGGER.error('Hetzner => ID for domain %s does not exists', qdomain)
raise AssertionError
def _get_zone(self, domain, domain_id):
"""
Pulls the zone for the current domain from authenticated Hetzner account and
returns it as an zone object.
"""
api = self.api[self.account]
for request in api['zone']['GET']:
url = (request.copy()).get('url', '/').replace('<id>', domain_id)
params = request.get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<id>', domain_id)
response = self._get(url, query_params=params)
dom = Provider._filter_dom(response.text, api['filter'])
zone_file_filter = [{'name': 'textarea', 'attrs': {'name': api['zone']['file']}}]
zone_file = Provider._filter_dom(dom, zone_file_filter).renderContents().decode('UTF-8')
hidden = Provider._extract_hidden_data(dom)
zone = {'data': dns.zone.from_text(zone_file, origin=domain, relativize=False),
'hidden': hidden}
LOGGER.info('Hetzner => Get zone for domain %s', domain)
return zone
def _post_zone(self, zone):
"""
Pushes updated zone for current domain to authenticated Hetzner account and
returns a boolean, if update was successful or not. Furthermore, waits until
the zone has been taken over, if it is a Hetzner Robot account.
"""
api = self.api[self.account]['zone']
data = zone['hidden']
data[api['file']] = zone['data'].to_text(relativize=True)
response = self._post(api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to update zone for domain %s: Syntax error\n\n%s',
zone['data'].origin.to_unicode(True),
zone['data'].to_text(relativize=True).decode('UTF-8'))
return False
LOGGER.info('Hetzner => Update zone for domain %s',
zone['data'].origin.to_unicode(True))
if self.account == 'robot':
latency = self._get_provider_option('latency')
LOGGER.info('Hetzner => Wait %ds until Hetzner Robot has taken over zone...',
latency)
time.sleep(latency)
return True
<|code_end|>
| lexicon/providers/hetzner.py
<|code_start|>from __future__ import absolute_import
from __future__ import unicode_literals
from contextlib import contextmanager
import hashlib
import logging
import re
import time
import requests
from six import string_types
from urllib3.util.retry import Retry
# Due to optional requirement
try:
from bs4 import BeautifulSoup
import dns.resolver
import dns.zone
except ImportError:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = []
def ProviderParser(subparser):
subparser.add_argument('--auth-account',
help='specify type of Hetzner account: by default Hetzner Robot '
'(robot) or Hetzner konsoleH (konsoleh)')
subparser.add_argument('--auth-username', help='specify username of Hetzner account')
subparser.add_argument('--auth-password', help='specify password of Hetzner account')
subparser.add_argument('--linked',
help='if exists, uses linked CNAME as A|AAAA|TXT record name for edit '
'actions: by default (yes); Further restriction: Only enabled if '
'record name or raw FQDN record identifier \'type/name/content\' is '
'specified, and additionally for update actions the record name '
'remains the same',
default=str('yes'),
choices=['yes', 'no'])
subparser.add_argument('--propagated',
help='waits until record is publicly propagated after succeeded '
'create|update actions: by default (yes)',
default=str('yes'),
choices=['yes', 'no'])
subparser.add_argument('--latency',
help='specify latency, used during checks for publicly propagation '
'and additionally for Hetzner Robot after record edits: by default '
'30s (30)',
default=int(30),
type=int)
class Provider(BaseProvider):
"""
Implements the Hetzner DNS Provider.
There are two variants to manage DNS records on Hetzner: Hetzner Robot or
Hetzner konsoleH. Both do not provide a common API, therefore this provider
implements missing read and write methods in a generic way. For editing DNS
records on Hetzner, this provider manipulates and replaces the whole DNS zone.
Furthermore, there is no unique identifier to each record in the way that Lexicon
expects, why this provider implements a pseudo-identifer based on the record type,
name and content for use of the --identifier parameter. Supported identifier
formats are:
- hash generated|verified by 'list' command; e.g. '30fa112'
- raw concatenation of the record type, name (FQDN) and content (if possible
FQDN) with delimiter '/'; e.g. 'SRV/example.com./0 0 443 msx.example.com.'
or 'TXT/example.com./challengetoken'
Additional, this provider implements the option of replacing an A, AAAA or TXT record
name with an existent linked CNAME for edit actions via the --linked parameter and
the option of waiting until record is publicly propagated after succeeded create or
update actions via the --propagated parameter. As further restriction, the use of a
linked CNAME is only enabled if the record type & record name or the raw identifier are
specified, and additionally for the update action the record name remains the same.
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api = {
'robot': {
'endpoint': 'https://robot.your-server.de',
'filter': [{'name': 'div', 'attrs': {'id': 'center_col'}}],
'auth': {
'endpoint': 'https://accounts.hetzner.com',
'GET': {'url': '/login'},
'POST': {'url': '/login_check'},
'filter': [{'name': 'form', 'attrs': {'id': 'login-form'}}],
'user': '_username',
'pass': '_password'
},
'exit': {
'GET': {'url': '/login/logout/r/true'}
},
'domain_id': {
'GET': {'url': '/dns/index/page/<index>'},
'filter': [
{'name': 'div', 'attrs': {'id': 'center_col'}},
{'name': 'table', 'attrs': {'class': 'box_title'}}
],
'domain': [{'name': 'td', 'attrs': {'class': 'title'}}],
'id': {'attr': 'onclick', 'regex': r'\'(\d+)\''}
},
'zone': {
'GET': [{'url': '/dns/update/id/<id>'}],
'POST': {'url': '/dns/update'},
'filter': [
{'name': 'div', 'attrs': {'id': 'center_col'}},
{'name': 'ul', 'attrs': {'class': 'error_list'}}
],
'file': 'zonefile'
}
},
'konsoleh': {
'endpoint': 'https://konsoleh.your-server.de',
'filter': [{'name': 'div', 'attrs': {'id': 'content'}}],
'auth': {
'GET': {},
'POST': {'url': '/login.php'},
'filter': [{'name': 'form', 'attrs': {'id': 'loginform'}}],
'user': 'login_user_inputbox',
'pass': 'login_pass_inputbox'
},
'exit': {
'GET': {'url': '/logout.php'}
},
'domain_id': {
'GET': {'params': {'page': '<index>'}},
'filter': [
{'name': 'div', 'attrs': {'id': 'domainlist'}},
{'name': 'dl'},
{'name': 'a'}
],
'domain': [{'name': 'strong'}],
'id': {'attr': 'href', 'regex': r'=(D\d+)'}
},
'zone': {
'GET': [
{'params': {'domain_number': '<id>'}},
{'url': '/dns.php', 'params': {'dnsaction2': 'editintextarea'}}
],
'POST': {'url': '/dns.php'},
'filter': [
{'name': 'div', 'attrs': {'id': 'content'}},
{'name': 'div', 'attrs': {'class': 'error'}}
],
'file': 'zone_file1'
}
}
}
self.session = None
self.account = self._get_provider_option('auth_account')
if self.account in (None, 'robot', 'konsoleh'):
self.account = self.account if self.account else 'robot'
else:
LOGGER.error('Hetzner => Argument for --auth-account is invalid: \'%s\' '
'(choose from \'robot\' or \'konsoleh\')', self.account)
raise AssertionError
self.username = self._get_provider_option('auth_username')
assert self.username is not None
self.password = self._get_provider_option('auth_password')
assert self.password is not None
def authenticate(self):
"""
Connects to Hetzner account and returns, if authentification was
successful and the domain or CNAME target is managed by this account.
"""
with self._session(self.domain, get_zone=False):
return True
def create_record(self, type, name, content):
"""
Connects to Hetzner account, adds a new record to the zone and returns a
boolean, if creation was successful or not. Needed record type, name and
content for record to create.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if not type or not name or not content:
LOGGER.warning('Hetzner => Record has no type|name|content specified')
return False
# Add record to zone
name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name)
rrset = ddata['zone']['data'].get_rdataset(name, rdtype=type, create=True)
for rdata in rrset:
if self._convert_content(type, content) == rdata.to_text():
LOGGER.info('Hetzner => Record with content \'%s\' already exists',
content)
return True
ttl = (rrset.ttl if rrset.ttl > 0
and rrset.ttl < self._get_lexicon_option('ttl')
else self._get_lexicon_option('ttl'))
rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype,
ttl, self._convert_content(type, content))
rrset.update(rdataset)
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
if synced_change:
self._propagated_record(type, name, self._convert_content(type, content),
ddata['nameservers'])
return synced_change
def list_records(self, type=None, name=None, content=None):
"""
Connects to Hetzner account and returns a list of records filtered by record
type, name and content. The list is empty if no records found.
"""
with self._session(self.domain, self.domain_id) as ddata:
name = self._fqdn_name(name) if name else None
return self._list_records(ddata['zone']['data'], type, name, content)
def update_record(self, identifier=None, type=None, name=None, content=None):
"""
Connects to Hetzner account, changes an existing record and returns a boolean,
if update was successful or not. Needed identifier or type & name to lookup
over all records of the zone for exactly one record to update.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if identifier:
dtype, dname, dcontent = self._parse_identifier(identifier, ddata['zone']['data'])
if dtype and dname and dcontent:
type = type if type else dtype
name = name if name else dname
content = content if content else dcontent
else:
LOGGER.warning('Hetzner => Record with identifier \'%s\' does not exist',
identifier)
return False
elif type and name and content:
dtype, dname, dcontent = type, name, None
else:
LOGGER.warning('Hetzner => Record has no type|name|content specified')
return False
dname = ddata['cname'] if ddata['cname'] else self._fqdn_name(dname)
records = self._list_records(ddata['zone']['data'], dtype, dname, dcontent)
if len(records) == 1:
# Remove record from zone
rrset = ddata['zone']['data'].get_rdataset(records[0]['name']+'.',
rdtype=records[0]['type'])
rdatas = []
for rdata in rrset:
if self._convert_content(records[0]['type'],
records[0]['content']) != rdata.to_text():
rdatas.append(rdata.to_text())
if rdatas:
rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype,
records[0]['ttl'], rdatas)
ddata['zone']['data'].replace_rdataset(records[0]['name']+'.', rdataset)
else:
ddata['zone']['data'].delete_rdataset(records[0]['name']+'.',
records[0]['type'])
# Add record to zone
name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name)
rrset = ddata['zone']['data'].get_rdataset(name, rdtype=type, create=True)
synced_change = False
for rdata in rrset:
if self._convert_content(type, content) == rdata.to_text():
LOGGER.info('Hetzner => Record with content \'%s\' already exists',
content)
synced_change = True
break
if not synced_change:
ttl = (rrset.ttl if rrset.ttl > 0
and rrset.ttl < self._get_lexicon_option('ttl')
else self._get_lexicon_option('ttl'))
rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype, ttl,
self._convert_content(type, content))
rrset.update(rdataset)
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
if synced_change:
self._propagated_record(type, name, self._convert_content(type, content),
ddata['nameservers'])
return synced_change
LOGGER.warning('Hetzner => Record lookup has not only one match')
return False
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""
Connects to Hetzner account, removes an existing record from the zone and returns a
boolean, if deletion was successful or not. Uses identifier or type, name & content to
lookup over all records of the zone for one or more records to delete.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if identifier:
type, name, content = self._parse_identifier(identifier, ddata['zone']['data'])
if type is None or name is None or content is None:
LOGGER.info('Hetzner => Record with identifier \'%s\' does not exist',
identifier)
return True
name = ddata['cname'] if ddata['cname'] else (self._fqdn_name(name) if name else None)
records = self._list_records(ddata['zone']['data'], type, name, content)
if records:
# Remove records from zone
for record in records:
rrset = ddata['zone']['data'].get_rdataset(record['name']+'.',
rdtype=record['type'])
rdatas = []
for rdata in rrset:
if self._convert_content(record['type'],
record['content']) != rdata.to_text():
rdatas.append(rdata.to_text())
if rdatas:
rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype,
record['ttl'], rdatas)
ddata['zone']['data'].replace_rdataset(record['name']+'.', rdataset)
else:
ddata['zone']['data'].delete_rdataset(record['name']+'.', record['type'])
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
return synced_change
LOGGER.info('Hetzner => Record lookup has no matches')
return True
###############################################################################
# Provider base helpers
###############################################################################
@staticmethod
def _create_identifier(rdtype, name, content):
"""
Creates hashed identifier based on full qualified record type, name & content
and returns hash.
"""
sha256 = hashlib.sha256()
sha256.update((rdtype + '/').encode('UTF-8'))
sha256.update((name + '/').encode('UTF-8'))
sha256.update(content.encode('UTF-8'))
return sha256.hexdigest()[0:7]
def _parse_identifier(self, identifier, zone=None):
"""
Parses the record identifier and returns type, name & content of the associated record
as tuple. The tuple is empty if no associated record found.
"""
rdtype, name, content = None, None, None
if len(identifier) > 7:
parts = identifier.split('/')
rdtype, name, content = parts[0], parts[1], '/'.join(parts[2:])
else:
records = self._list_records(zone)
for record in records:
if record['id'] == identifier:
rdtype, name, content = record['type'], record['name']+'.', record['content']
return rdtype, name, content
def _convert_content(self, rdtype, content):
"""
Converts type dependent record content into well formed and fully qualified
content for domain zone and returns content.
"""
if rdtype == 'TXT':
if content[0] != '"':
content = '"' + content
if content[-1] != '"':
content += '"'
if rdtype in ('CNAME', 'MX', 'NS', 'SRV'):
if content[-1] != '.':
content = self._fqdn_name(content)
return content
def _list_records(self, zone, rdtype=None, name=None, content=None):
"""
Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found.
"""
records = []
rrsets = zone.iterate_rdatasets() if zone else []
for rname, rdataset in rrsets:
rtype = dns.rdatatype.to_text(rdataset.rdtype)
if ((not rdtype or rdtype == rtype)
and (not name or name == rname.to_text())):
for rdata in rdataset:
rdata = rdata.to_text()
if (not content or self._convert_content(rtype, content) == rdata):
raw_rdata = self._clean_TXT_record({'type': rtype,
'content': rdata})['content']
data = {
'type': rtype,
'name': rname.to_text(True),
'ttl': int(rdataset.ttl),
'content': raw_rdata,
'id': Provider._create_identifier(rtype, rname.to_text(), raw_rdata)
}
records.append(data)
return records
def _request(self, action='GET', url='/', data=None, query_params=None):
"""
Requests to Hetzner by current session and returns the response.
"""
if data is None:
data = {}
if query_params is None:
query_params = {}
response = self.session.request(action, self.api[self.account]['endpoint'] + url,
params=query_params, data=data)
response.raise_for_status()
return response
###############################################################################
# Provider option helpers
###############################################################################
@staticmethod
def _dns_lookup(name, rdtype, nameservers=None):
"""
Looks on specified or default system domain nameservers to resolve record type
& name and returns record set. The record set is empty if no propagated
record found.
"""
rrset = dns.rrset.from_text(name, 0, 1, rdtype)
try:
resolver = dns.resolver.Resolver()
resolver.lifetime = 1
if nameservers:
resolver.nameservers = nameservers
rrset = resolver.query(name, rdtype)
for rdata in rrset:
LOGGER.debug('DNS Lookup => %s %s %s %s',
rrset.name.to_text(), dns.rdataclass.to_text(rrset.rdclass),
dns.rdatatype.to_text(rrset.rdtype), rdata.to_text())
except dns.exception.DNSException as error:
LOGGER.debug('DNS Lookup => %s', error)
return rrset
@staticmethod
def _get_nameservers(domain):
"""
Looks for domain nameservers and returns the IPs of the nameservers as a list.
The list is empty, if no nameservers were found. Needed associated domain zone
name for lookup.
"""
nameservers = []
rdtypes_ns = ['SOA', 'NS']
rdtypes_ip = ['A', 'AAAA']
for rdtype_ns in rdtypes_ns:
for rdata_ns in Provider._dns_lookup(domain, rdtype_ns):
for rdtype_ip in rdtypes_ip:
for rdata_ip in Provider._dns_lookup(rdata_ns.to_text().split(' ')[0],
rdtype_ip):
if rdata_ip.to_text() not in nameservers:
nameservers.append(rdata_ip.to_text())
LOGGER.debug('DNS Lookup => %s IN NS %s', domain, ' '.join(nameservers))
return nameservers
@staticmethod
def _get_dns_cname(name, link=False):
"""
Looks for associated domain zone, nameservers and linked record name until no
more linked record name was found for the given fully qualified record name or
the CNAME lookup was disabled, and then returns the parameters as a tuple.
"""
resolver = dns.resolver.Resolver()
resolver.lifetime = 1
domain = dns.resolver.zone_for_name(name, resolver=resolver).to_text(True)
nameservers = Provider._get_nameservers(domain)
cname = None
links, max_links = 0, 5
while link:
if links >= max_links:
LOGGER.error('Hetzner => Record %s has more than %d linked CNAME '
'records. Reduce the amount of CNAME links!',
name, max_links)
raise AssertionError
qname = cname if cname else name
rrset = Provider._dns_lookup(qname, 'CNAME', nameservers)
if rrset:
links += 1
cname = rrset[0].to_text()
qdomain = dns.resolver.zone_for_name(cname, resolver=resolver).to_text(True)
if domain != qdomain:
domain = qdomain
nameservers = Provider._get_nameservers(qdomain)
else:
link = False
if cname:
LOGGER.info('Hetzner => Record %s has CNAME %s', name, cname)
return domain, nameservers, cname
def _link_record(self):
"""
Checks restrictions for use of CNAME lookup and returns a tuple of the
fully qualified record name to lookup and a boolean, if a CNAME lookup
should be done or not. The fully qualified record name is empty if no
record name is specified by this provider.
"""
action = self._get_lexicon_option('action')
identifier = self._get_lexicon_option('identifier')
rdtype = self._get_lexicon_option('type')
name = (self._fqdn_name(self._get_lexicon_option('name'))
if self._get_lexicon_option('name') else None)
link = True if self._get_provider_option('linked') == 'yes' else False
qname = name
if identifier:
rdtype, name, _ = self._parse_identifier(identifier)
if action != 'list' and rdtype in ('A', 'AAAA', 'TXT') and name and link:
if action != 'update' or name == qname or not qname:
LOGGER.info('Hetzner => Enable CNAME lookup '
'(see --linked parameter)')
return name, True
LOGGER.info('Hetzner => Disable CNAME lookup '
'(see --linked parameter)')
return name, False
def _propagated_record(self, rdtype, name, content, nameservers=None):
"""
If the publicly propagation check should be done, waits until the domain nameservers
responses with the propagated record type, name & content and returns a boolean,
if the publicly propagation was successful or not.
"""
latency = self._get_provider_option('latency')
propagated = True if self._get_provider_option('propagated') == 'yes' else False
if propagated:
retry, max_retry = 0, 20
while retry < max_retry:
for rdata in Provider._dns_lookup(name, rdtype, nameservers):
if content == rdata.to_text():
LOGGER.info('Hetzner => Record %s has %s %s', name, rdtype, content)
return True
retry += 1
retry_log = (', retry ({}/{}) in {}s...'.format((retry + 1), max_retry, latency)
if retry < max_retry else '')
LOGGER.info('Hetzner => Record is not propagated%s', retry_log)
time.sleep(latency)
return False
###############################################################################
# Hetzner API helpers
###############################################################################
@staticmethod
def _filter_dom(dom, filters, last_find_all=False):
"""
If not exists, creates an DOM from a given session response, then filters the DOM
via given API filters and returns the filtered DOM. The DOM is empty if the filters
have no match.
"""
if isinstance(dom, string_types):
dom = BeautifulSoup(dom, 'html.parser')
for idx, find in enumerate(filters, start=1):
if not dom:
break
name, attrs = find.get('name'), find.get('attrs', {})
if len(filters) == idx and last_find_all:
dom = dom.find_all(name, attrs=attrs) if name else dom.find_all(attrs=attrs)
else:
dom = dom.find(name, attrs=attrs) if name else dom.find(attrs=attrs)
return dom
@staticmethod
def _extract_hidden_data(dom):
"""
Extracts hidden input data from DOM and returns the data as dictionary.
"""
input_tags = dom.find_all('input', attrs={'type': 'hidden'})
data = {}
for input_tag in input_tags:
data[input_tag['name']] = input_tag['value']
return data
@staticmethod
def _extract_domain_id(string, regex):
"""
Extracts domain ID from given string and returns the domain ID.
"""
regex = re.compile(regex)
match = regex.search(string)
if not match:
return False
return str(match.group(1))
@contextmanager
def _session(self, domain, domain_id=None, get_zone=True):
"""
Generates, authenticates and exits session to Hetzner account, and
provides tuple of additional needed domain data (domain nameservers,
zone and linked record name) to public methods. The tuple parameters
are empty if not existent or specified. Exits session and raises error
if provider fails during session.
"""
name, link = self._link_record()
qdomain, nameservers, cname = Provider._get_dns_cname((name if name else domain+'.'), link)
qdomain_id, zone = domain_id, None
self.session = self._auth_session(self.username, self.password)
try:
if not domain_id or qdomain != domain:
qdomain_id = self._get_domain_id(qdomain)
if qdomain == domain:
self.domain_id = qdomain_id
if get_zone:
zone = self._get_zone(qdomain, qdomain_id)
yield {'nameservers': nameservers, 'zone': zone, 'cname': cname}
except Exception as exc:
raise exc
finally:
self._exit_session()
def _auth_session(self, username, password):
"""
Creates session to Hetzner account, authenticates with given credentials and
returns the session, if authentication was successful. Otherwise raises error.
"""
api = self.api[self.account]['auth']
endpoint = api.get('endpoint', self.api[self.account]['endpoint'])
session = requests.Session()
session_retries = Retry(total=10, backoff_factor=0.5)
session_adapter = requests.adapters.HTTPAdapter(max_retries=session_retries)
session.mount('https://', session_adapter)
response = session.request('GET', endpoint + api['GET'].get('url', '/'))
dom = Provider._filter_dom(response.text, api['filter'])
data = Provider._extract_hidden_data(dom)
data[api['user']], data[api['pass']] = username, password
response = session.request('POST', endpoint + api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to authenticate session with %s account \'%s\': '
'Invalid credentials',
self.account, username)
raise AssertionError
LOGGER.info('Hetzner => Authenticate session with %s account \'%s\'',
self.account, username)
return session
def _exit_session(self):
"""
Exits session to Hetzner account and returns.
"""
api = self.api[self.account]
response = self._get(api['exit']['GET']['url'])
if not Provider._filter_dom(response.text, api['filter']):
LOGGER.info('Hetzner => Exit session')
else:
LOGGER.warning('Hetzner => Unable to exit session')
self.session = None
return True
def _get_domain_id(self, domain):
"""
Pulls all domains managed by authenticated Hetzner account, extracts their IDs
and returns the ID for the current domain, if exists. Otherwise raises error.
"""
api = self.api[self.account]['domain_id']
qdomain = dns.name.from_text(domain).to_unicode(True)
domains, last_count, page = {}, -1, 0
while last_count != len(domains):
last_count = len(domains)
page += 1
url = (api['GET'].copy()).get('url', '/').replace('<index>', str(page))
params = api['GET'].get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<index>', str(page))
response = self._get(url, query_params=params)
domain_tags = Provider._filter_dom(response.text, api['filter'], True)
for domain_tag in domain_tags:
domain_id = Provider._extract_domain_id(dict(domain_tag.attrs)[api['id']['attr']],
api['id']['regex'])
domain = (Provider._filter_dom(domain_tag, api['domain'])
.renderContents().decode('UTF-8'))
domains[domain] = domain_id
if domain == qdomain:
LOGGER.info('Hetzner => Get ID %s for domain %s', domain_id, qdomain)
return domain_id
LOGGER.error('Hetzner => ID for domain %s does not exists', qdomain)
raise AssertionError
def _get_zone(self, domain, domain_id):
"""
Pulls the zone for the current domain from authenticated Hetzner account and
returns it as an zone object.
"""
api = self.api[self.account]
for request in api['zone']['GET']:
url = (request.copy()).get('url', '/').replace('<id>', domain_id)
params = request.get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<id>', domain_id)
response = self._get(url, query_params=params)
dom = Provider._filter_dom(response.text, api['filter'])
zone_file_filter = [{'name': 'textarea', 'attrs': {'name': api['zone']['file']}}]
zone_file = Provider._filter_dom(dom, zone_file_filter).renderContents().decode('UTF-8')
hidden = Provider._extract_hidden_data(dom)
zone = {'data': dns.zone.from_text(zone_file, origin=domain, relativize=False),
'hidden': hidden}
LOGGER.info('Hetzner => Get zone for domain %s', domain)
return zone
def _post_zone(self, zone):
"""
Pushes updated zone for current domain to authenticated Hetzner account and
returns a boolean, if update was successful or not. Furthermore, waits until
the zone has been taken over, if it is a Hetzner Robot account.
"""
api = self.api[self.account]['zone']
data = zone['hidden']
data[api['file']] = zone['data'].to_text(relativize=True)
response = self._post(api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to update zone for domain %s: Syntax error\n\n%s',
zone['data'].origin.to_unicode(True),
zone['data'].to_text(relativize=True).decode('UTF-8'))
return False
LOGGER.info('Hetzner => Update zone for domain %s',
zone['data'].origin.to_unicode(True))
if self.account == 'robot':
latency = self._get_provider_option('latency')
LOGGER.info('Hetzner => Wait %ds until Hetzner Robot has taken over zone...',
latency)
time.sleep(latency)
return True
<|code_end|>
|
Memset provider: TypeError: string indices must be integers
Hi,
When using the Memset provider with the default table formatting I get this error:
```bash
$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300
Traceback (most recent call last):
File "/usr/local/bin/lexicon", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 133, in main
handle_output(results, parsed_args.output)
File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 109, in handle_output
table = generate_table_result(logger, results, output_type == 'TABLE-NO-HEADER')
File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 75, in generate_table_result
array = [[row['id'], row['type'], row['name'], row['content'], row['ttl']] for row in output]
TypeError: string indices must be integers
```
I think this is because `output` is a string not an array - when I added `print output` I got a string like `969f9caabe19859c11249333dd80aa15`.
When I use `--output JSON` I get the same ID plus quotes:
```bash
$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300 --output JSON
"969f9caabe19859c11249333dd80aa15"
```
I know Memset's not public so if you need any help to test it just let me know. For now I'll work around it with `--output QUIET` since I don't really care about the output here.
Thanks!
Dave
| lexicon/cli.py
<|code_start|>#!/usr/bin/env python
"""Module for Lexicon command-line interface"""
from __future__ import absolute_import, print_function
import json
import logging
import os
import sys
from lexicon.client import Client
from lexicon.config import ConfigResolver
from lexicon.parser import generate_cli_main_parser
logger = logging.getLogger(__name__) # pylint: disable=C0103
def generate_table_result(lexicon_logger, output=None, without_header=None):
"""Convert returned JSON into a nice table for command line usage"""
try:
_ = (entry for entry in output)
except TypeError:
lexicon_logger.debug('Command output is not iterable, and then cannot '
'be printed with --quiet parameter not enabled.')
return None
array = [[
row.get('id', ''),
row.get('type', ''),
row.get('name', ''),
row.get('content', ''),
row.get('ttl', '')] for row in output]
# Insert header (insert before calculating the max width of each column
# to take headers size into account)
if not without_header:
headers = ['ID', 'TYPE', 'NAME', 'CONTENT', 'TTL']
array.insert(0, headers)
column_widths = [0, 0, 0, 0, 0]
# Find max width for each column
for row in array:
for idx, col in enumerate(row):
width = len(str(col))
if width > column_widths[idx]:
column_widths[idx] = width
# Add a 'nice' separator
if not without_header:
array.insert(1, ['-' * column_widths[idx]
for idx in range(len(column_widths))])
# Construct table to be printed
table = []
for row in array:
row_list = []
for idx, col in enumerate(row):
row_list.append(str(col).ljust(column_widths[idx]))
table.append(' '.join(row_list))
# Return table
return '\n'.join(table)
def handle_output(results, output_type):
"""Print the relevant output for given output_type"""
if not output_type == 'QUIET':
if not output_type == 'JSON':
table = generate_table_result(
logger, results, output_type == 'TABLE-NO-HEADER')
if table:
print(table)
else:
try:
_ = (entry for entry in results)
json_str = json.dumps(results)
if json_str:
print(json_str)
except TypeError:
logger.debug('Output is not a JSON, and then cannot '
'be printed with --output=JSON parameter.')
def main():
"""Main function of Lexicon."""
# Dynamically determine all the providers available and gather command line arguments.
parsed_args = generate_cli_main_parser().parse_args()
log_level = logging.getLevelName(parsed_args.log_level)
logging.basicConfig(stream=sys.stdout, level=log_level,
format='%(message)s')
logger.debug('Arguments: %s', parsed_args)
# In the CLI context, will get configuration interactively:
# * from the command line
# * from the environment variables
# * from lexicon configuration files in working directory
config = ConfigResolver()
config.with_args(parsed_args).with_env().with_config_dir(os.getcwd())
client = Client(config)
results = client.execute()
handle_output(results, parsed_args.output)
if __name__ == '__main__':
main()
<|code_end|>
| lexicon/cli.py
<|code_start|>#!/usr/bin/env python
"""Module for Lexicon command-line interface"""
from __future__ import absolute_import, print_function
import json
import logging
import os
import sys
from lexicon.client import Client
from lexicon.config import ConfigResolver
from lexicon.parser import generate_cli_main_parser
logger = logging.getLogger(__name__) # pylint: disable=C0103
def generate_list_table_result(lexicon_logger, output=None, without_header=None):
"""Convert returned data from list actions into a nice table for command line usage"""
if not isinstance(output, list):
lexicon_logger.debug('Command output is not a list, and then cannot '
'be printed with --quiet parameter not enabled.')
return None
array = [[
row.get('id', ''),
row.get('type', ''),
row.get('name', ''),
row.get('content', ''),
row.get('ttl', '')] for row in output]
# Insert header (insert before calculating the max width of each column
# to take headers size into account)
if not without_header:
headers = ['ID', 'TYPE', 'NAME', 'CONTENT', 'TTL']
array.insert(0, headers)
column_widths = [0, 0, 0, 0, 0]
# Find max width for each column
for row in array:
for idx, col in enumerate(row):
width = len(str(col))
if width > column_widths[idx]:
column_widths[idx] = width
# Add a 'nice' separator
if not without_header:
array.insert(1, ['-' * column_widths[idx]
for idx in range(len(column_widths))])
# Construct table to be printed
table = []
for row in array:
row_list = []
for idx, col in enumerate(row):
row_list.append(str(col).ljust(column_widths[idx]))
table.append(' '.join(row_list))
# Return table
return os.linesep.join(table)
def generate_table_results(output=None, without_header=None):
"""Convert returned data from non-list actions into a nice table for command line usage"""
array = []
str_output = str(output)
if not without_header:
array.append('RESULT')
array.append('-' * max(6, len(str_output)))
array.append(str_output)
return os.linesep.join(array)
def handle_output(results, output_type, action):
"""Print the relevant output for given output_type"""
if output_type == 'QUIET':
return
if not output_type == 'JSON':
if action == 'list':
table = generate_list_table_result(
logger, results, output_type == 'TABLE-NO-HEADER')
else:
table = generate_table_results(results, output_type == 'TABLE-NO-HEADER')
if table:
print(table)
else:
try:
json_str = json.dumps(results)
if json_str:
print(json_str)
except TypeError:
logger.debug('Output is not JSON serializable, and then cannot '
'be printed with --output=JSON parameter.')
def main():
"""Main function of Lexicon."""
# Dynamically determine all the providers available and gather command line arguments.
parsed_args = generate_cli_main_parser().parse_args()
log_level = logging.getLevelName(parsed_args.log_level)
logging.basicConfig(stream=sys.stdout, level=log_level,
format='%(message)s')
logger.debug('Arguments: %s', parsed_args)
# In the CLI context, will get configuration interactively:
# * from the command line
# * from the environment variables
# * from lexicon configuration files in working directory
config = ConfigResolver()
config.with_args(parsed_args).with_env().with_config_dir(os.getcwd())
client = Client(config)
results = client.execute()
handle_output(results, parsed_args.output, config.resolve('lexicon:action'))
if __name__ == '__main__':
main()
<|code_end|>
|
Add Hover Provider
Opening issue just to say I'm working on adding Hover to lexicon. I'm working on my fork here: https://github.com/bkanuka/lexicon
I'm working on the tests/recordings and then I'll open a PR
| lexicon/providers/hover.py
<|code_start|><|code_end|>
| lexicon/providers/hover.py
<|code_start|>"""Module provider for Hover"""
from __future__ import absolute_import
import json
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['hover.com']
def provider_parser(subparser):
"""Return the parser for this provider"""
subparser.add_argument(
"--auth-username", help="specify username for authentication")
subparser.add_argument(
"--auth-password", help="specify password for authentication")
class Provider(BaseProvider):
"""Provider class for Hover"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = 'https://www.hover.com/api'
self.cookies = {}
def _authenticate(self):
# Getting required cookies "hover_session" and "hoverauth"
response = requests.get("https://www.hover.com/signin")
self.cookies["hover_session"] = response.cookies['hover_session']
payload = {"username": self._get_provider_option('auth_username'),
"password": self._get_provider_option('auth_password')}
response = requests.post("https://www.hover.com/signin/auth.json",
json=payload,
cookies=self.cookies)
response.raise_for_status()
if "hoverauth" not in response.cookies:
raise Exception("Unexpected auth response")
self.cookies["hoverauth"] = response.cookies["hoverauth"]
# Make sure domain exists
# domain is stored in self.domain from BaseProvider
domains = self._list_domains()
for domain in domains:
if domain['name'] == self.domain:
self.domain_id = domain['id']
if self.domain_id is None:
raise Exception('Domain {} not found'.format(self.domain))
def _list_domains(self):
response = self._get('/domains')
domains = []
for domain in response['domains']:
processed_domain = {
'name': domain['domain_name'],
'id': domain['id'],
'active': (domain['status'] == "active")
}
domains.append(processed_domain)
LOGGER.debug('list_domains: %s', domains)
return domains
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
payload = self._get('/domains/{0}/dns'.format(self.domain_id))
# payload['domains'] should be a list of len 1
try:
raw_records = payload['domains'][0]['entries']
except (KeyError, IndexError):
raise Exception("Unexpected response")
processed_records = []
for record in raw_records:
processed_record = {
'type': record['type'],
'name': self._full_name(record['name']),
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']
}
processed_records.append(processed_record)
if rtype:
processed_records = [record for record in processed_records if record['type'] == rtype]
if name:
name = self._relative_name(name)
processed_records = [record for record in processed_records if name in record['name']]
if content:
processed_records = [record for record in processed_records
if record['content'].lower() == content.lower()]
LOGGER.debug('list_records: %s', processed_records)
return processed_records
def _create_record(self, rtype, name, content):
name = self._relative_name(name)
records = self._list_records(rtype, name, content)
if records:
LOGGER.debug('not creating duplicate record: %s', records[0])
return True
record = {"name": name,
"type": rtype,
"content": content}
if self._get_lexicon_option('ttl'):
record['ttl'] = self._get_lexicon_option('ttl')
LOGGER.debug('create_record: %s', record)
payload = self._post("/domains/{0}/dns".format(self.domain_id), record)
return payload['succeeded']
# Update a record. Hover cannot update name so we delete and recreate.
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier:
records = self._list_records()
records = [r for r in records if r['id'] == identifier]
else:
records = self._list_records(rtype, name, None)
if not records:
raise Exception("Record not found")
if len(records) > 1:
raise Exception("Record not unique")
orig_record = records[0]
orig_id = orig_record['id']
new_rtype = rtype if rtype else orig_record['type']
new_name = name if name else orig_record['name']
new_content = content if content else orig_record['content']
self._delete_record(orig_id)
return self._create_record(new_rtype, new_name, new_content)
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_ids = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_ids = [record['id'] for record in records]
else:
delete_record_ids.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_ids)
for record_id in delete_record_ids:
self._delete("/dns/{0}".format(record_id))
LOGGER.debug('delete_record: %s', record_id)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(action, self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
cookies=self.cookies,
headers={'Content-Type': 'application/json'})
# if the request fails for any reason, throw an error.
response.raise_for_status()
try:
return response.json()
except ValueError: # response is not json
raise Exception("Did not get JSON response.")
<|code_end|>
|
Rackspace provider not getting token from the identity API
_Update: solved! See my comment below._
I'm just starting out with lexicon and trying to get it working with Rackspace Cloud DNS. Requests are failing with a `requests.exceptions.HTTPError: 401 Client Error: Resource not found for validate token request` error. With debugging on, it appears lexicon is not requesting a token from `https://identity.api.rackspacecloud.com/v2.0/tokens` before it continues to submit a reqeuset to `https://dns.api.rackspacecloud.com/v1.0/…`.
Here's the command I'm running:
lexicon rackspace --log_level DEBUG --auth-username myusername --auth-api-key XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX list example.com A
And here's the output:
Arguments: Namespace(action='list', auth_account=None, auth_api_key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', auth_token=None, auth_username='myusername', content=None, delegated=None, domain='example.com', identifier=None, log_level='DEBUG', name=None, output='TABLE', priority=None, provider_name='rackspace', sleep_time=1, ttl=None, type='A')
Starting new HTTPS connection (1): dns.api.rackspacecloud.com:443
https://dns.api.rackspacecloud.com:443 "GET /v1.0/None/domains?name=example.com HTTP/1.1" 401 0
Traceback (most recent call last):
File "/Users/q/src/dns-lexicon/env/bin/lexicon", line 10, in <module>
sys.exit(main())
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/cli.py", line 117, in main
results = client.execute()
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/client.py", line 64, in execute
self.provider.authenticate()
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py", line 69, in authenticate
return self._authenticate()
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/rackspace.py", line 66, in _authenticate
'name': self.domain
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py", line 142, in _get
return self._request('GET', url, query_params=query_params)
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/lexicon/providers/rackspace.py", line 190, in _request
response.raise_for_status()
File "/Users/q/src/dns-lexicon/env/lib/python3.7/site-packages/requests/models.py", line 940, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 401 Client Error: Resource not found for validate token request for url: https://dns.api.rackspacecloud.com/v1.0/None/domains?name=example.com
Of course, this request will fail if is no token has been received from the `identity.api.rackspacecloud.com` API endpoint.
I'm happy to help troubleshoot, but I thought I'd ask first if I'm using the correct parameters for the rackspace provider.
| lexicon/providers/rackspace.py
<|code_start|>"""Rackspace provider implementation"""
from __future__ import absolute_import
import json
import logging
import time
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['rackspacecloud.com']
def _async_request_completed(payload):
"""Looks into an async response payload to see if the requested job has finished."""
if payload['status'] == 'COMPLETED':
return True
if payload['status'] == 'ERROR':
return True
return False
def provider_parser(subparser):
"""Configure provider parser for Rackspace"""
subparser.add_argument(
"--auth-account", help="specify account number for authentication")
subparser.add_argument(
"--auth-username",
help="specify username for authentication. Only used if --auth-token is empty.")
subparser.add_argument(
"--auth-api-key",
help="specify api key for authentication. Only used if --auth-token is empty.")
subparser.add_argument(
"--auth-token",
help=("specify token for authentication. "
"If empty, the username and api key will be used to create a token."))
subparser.add_argument("--sleep-time", type=float, default=1,
help="number of seconds to wait between update requests.")
class Provider(BaseProvider):
"""Provider class for Rackspace"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = 'https://dns.api.rackspacecloud.com/v1.0'
self.auth_api_endpoint = 'https://identity.api.rackspacecloud.com/v2.0'
self._auth_token = None
def _authenticate(self):
self._auth_token = self._get_provider_option('auth_token')
if not self._auth_token:
auth_response = self._auth_request('POST', '/tokens', {
'auth': {
'RAX-KSKEY:apiKeyCredentials': {
'username': self._get_provider_option('auth_username'),
'apiKey': self._get_provider_option('auth_api_key')
}
}
})
self._auth_token = auth_response['access']['token']['id']
payload = self._get('/domains', {
'name': self.domain
})
if not payload['domains']:
raise Exception('No domain found')
if len(payload['domains']) > 1:
raise Exception('Too many domains found. This should not happen')
self.domain_id = payload['domains'][0]['id']
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
data = {'records': [
{'type': rtype, 'name': self._full_name(name), 'data': content}]}
if self._get_lexicon_option('ttl'):
data['records'][0]['ttl'] = self._get_lexicon_option('ttl')
try:
payload = self._post_and_wait(
'/domains/{0}/records'.format(self.domain_id), data)
except Exception as error:
if str(error).startswith('Record is a duplicate of another record'):
return self._update_record(None, rtype, name, content)
raise error
success = len(payload['records']) > 0
LOGGER.debug('create_record: %s', success)
return success
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
params = {'per_page': 100}
if rtype:
params['type'] = rtype
if name:
params['name'] = self._full_name(name)
# Sending the data filter to the Rackspace DNS API results in a 503 error
# if content:
# params['data'] = content
payload = self._get(
'/domains/{0}/records'.format(self.domain_id), params)
records = list(payload['records'])
if content:
records = [
record for record in records if record['data'] == content]
records = [{
'type': record['type'],
'name': record['name'],
'ttl': record['ttl'],
'content': record['data'],
'id': record['id']
} for record in records]
LOGGER.debug('list_records: %s', records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {}
if rtype:
data['type'] = rtype
if name:
data['name'] = self._full_name(name)
if content:
data['data'] = content
if self._get_lexicon_option('ttl'):
data['ttl'] = self._get_lexicon_option('ttl')
if identifier is None:
records = self._list_records(rtype, name)
if not records:
raise Exception('Unable to find record to modify: ' + name)
identifier = records[0]['id']
self._put_and_wait(
'/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
# If it didn't raise from the http status code, then we're good
LOGGER.debug('update_record: %s', identifier)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete_and_wait(
'/domains/{0}/records/{1}'.format(self.domain_id, record_id)
)
# If it didn't raise from the http status code, then we're good
success = True
LOGGER.debug('delete_record: %s', success)
return success
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
full_url = (self.api_endpoint +
'/{0}' + url).format(self._get_provider_option('auth_account'))
response = requests.request(action, full_url, params=query_params,
data=json.dumps(data),
headers={
'X-Auth-Token': self._get_provider_option('auth_token'),
'Content-Type': 'application/json'
})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
# Non-GET requests to the Rackspace CloudDNS API are asynchronous
def _request_and_wait(self, action='POST', url='/', data=None, query_params=None):
result = self._request(action, url, data, query_params)
sleep_time = self._get_provider_option('sleep_time') or '1'
sleep_time = float(sleep_time)
while not _async_request_completed(result):
if sleep_time:
time.sleep(sleep_time)
result = self._update_response(result)
if result['status'] == 'ERROR':
raise Exception(result['error']['details'])
if 'response' in result:
return result['response']
return None
def _post_and_wait(self, url='/', data=None, query_params=None):
return self._request_and_wait('POST', url, data, query_params)
def _put_and_wait(self, url='/', data=None, query_params=None):
return self._request_and_wait('PUT', url, data, query_params)
def _delete_and_wait(self, url='/', data=None, query_params=None):
return self._request_and_wait('DELETE', url, data, query_params)
def _update_response(self, payload):
response = requests.request('GET', payload['callbackUrl'], params={'showDetails': 'true'},
data={},
headers={
'X-Auth-Token': self._get_provider_option('auth_token'),
'Content-Type': 'application/json'})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
def _auth_request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
response = requests.request(action, self.auth_api_endpoint + url, params=query_params,
data=json.dumps(data),
headers={
'Content-Type': 'application/json'
})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
<|code_end|>
| lexicon/providers/rackspace.py
<|code_start|>"""Rackspace provider implementation"""
from __future__ import absolute_import
import json
import logging
import time
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['rackspacecloud.com']
def _async_request_completed(payload):
"""Looks into an async response payload to see if the requested job has finished."""
if payload['status'] == 'COMPLETED':
return True
if payload['status'] == 'ERROR':
return True
return False
def provider_parser(subparser):
"""Configure provider parser for Rackspace"""
subparser.add_argument(
"--auth-account", help="specify account number for authentication")
subparser.add_argument(
"--auth-username",
help="specify username for authentication. Only used if --auth-token is empty.")
subparser.add_argument(
"--auth-api-key",
help="specify api key for authentication. Only used if --auth-token is empty.")
subparser.add_argument(
"--auth-token",
help=("specify token for authentication. "
"If empty, the username and api key will be used to create a token."))
subparser.add_argument("--sleep-time", type=float, default=1,
help="number of seconds to wait between update requests.")
class Provider(BaseProvider):
"""Provider class for Rackspace"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = 'https://dns.api.rackspacecloud.com/v1.0'
self.auth_api_endpoint = 'https://identity.api.rackspacecloud.com/v2.0'
self._auth_token = None
self._auth_account = None
def _get_rackspace_option(self, key):
private_key = '_' + key
result = None
if hasattr(self, private_key):
result = getattr(self, private_key)
if result is None:
result = self._get_provider_option(key)
return result
def _authenticate(self):
self._auth_token = self._get_provider_option('auth_token')
if not self._auth_token:
auth_response = self._auth_request('POST', '/tokens', {
'auth': {
'RAX-KSKEY:apiKeyCredentials': {
'username': self._get_provider_option('auth_username'),
'apiKey': self._get_provider_option('auth_api_key')
}
}
})
self._auth_token = auth_response['access']['token']['id']
self._auth_account = auth_response['access']['token']['tenant']['id']
payload = self._get('/domains', {
'name': self.domain
})
if not payload['domains']:
raise Exception('No domain found')
if len(payload['domains']) > 1:
raise Exception('Too many domains found. This should not happen')
self.domain_id = payload['domains'][0]['id']
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
data = {'records': [
{'type': rtype, 'name': self._full_name(name), 'data': content}]}
if self._get_lexicon_option('ttl'):
data['records'][0]['ttl'] = self._get_lexicon_option('ttl')
try:
payload = self._post_and_wait(
'/domains/{0}/records'.format(self.domain_id), data)
except Exception as error:
if str(error).startswith('Record is a duplicate of another record'):
return self._update_record(None, rtype, name, content)
raise error
success = len(payload['records']) > 0
LOGGER.debug('create_record: %s', success)
return success
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
params = {'per_page': 100}
if rtype:
params['type'] = rtype
if name:
params['name'] = self._full_name(name)
# Sending the data filter to the Rackspace DNS API results in a 503 error
# if content:
# params['data'] = content
payload = self._get(
'/domains/{0}/records'.format(self.domain_id), params)
records = list(payload['records'])
if content:
records = [
record for record in records if record['data'] == content]
records = [{
'type': record['type'],
'name': record['name'],
'ttl': record['ttl'],
'content': record['data'],
'id': record['id']
} for record in records]
LOGGER.debug('list_records: %s', records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {}
if rtype:
data['type'] = rtype
if name:
data['name'] = self._full_name(name)
if content:
data['data'] = content
if self._get_lexicon_option('ttl'):
data['ttl'] = self._get_lexicon_option('ttl')
if identifier is None:
records = self._list_records(rtype, name)
if not records:
raise Exception('Unable to find record to modify: ' + name)
identifier = records[0]['id']
self._put_and_wait(
'/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
# If it didn't raise from the http status code, then we're good
LOGGER.debug('update_record: %s', identifier)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete_and_wait(
'/domains/{0}/records/{1}'.format(self.domain_id, record_id)
)
# If it didn't raise from the http status code, then we're good
success = True
LOGGER.debug('delete_record: %s', success)
return success
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
LOGGER.debug('request tenant ID: %s', self._get_rackspace_option('auth_account'))
full_url = (self.api_endpoint +
'/{0}' + url).format(self._get_rackspace_option('auth_account'))
response = requests.request(action, full_url, params=query_params,
data=json.dumps(data),
headers={
'X-Auth-Token': self._get_rackspace_option('auth_token'),
'Content-Type': 'application/json'
})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
# Non-GET requests to the Rackspace CloudDNS API are asynchronous
def _request_and_wait(self, action='POST', url='/', data=None, query_params=None):
result = self._request(action, url, data, query_params)
sleep_time = self._get_rackspace_option('sleep_time') or '1'
sleep_time = float(sleep_time)
while not _async_request_completed(result):
if sleep_time:
time.sleep(sleep_time)
result = self._update_response(result)
if result['status'] == 'ERROR':
raise Exception(result['error']['details'])
if 'response' in result:
return result['response']
return None
def _post_and_wait(self, url='/', data=None, query_params=None):
return self._request_and_wait('POST', url, data, query_params)
def _put_and_wait(self, url='/', data=None, query_params=None):
return self._request_and_wait('PUT', url, data, query_params)
def _delete_and_wait(self, url='/', data=None, query_params=None):
return self._request_and_wait('DELETE', url, data, query_params)
def _update_response(self, payload):
response = requests.request('GET', payload['callbackUrl'], params={'showDetails': 'true'},
data={},
headers={
'X-Auth-Token': self._get_rackspace_option('auth_token'),
'Content-Type': 'application/json'})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
def _auth_request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
response = requests.request(action, self.auth_api_endpoint + url, params=query_params,
data=json.dumps(data),
headers={
'Content-Type': 'application/json'
})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
<|code_end|>
|
DNSimple crash with team member that does not have a plan
Here's the error I get (I have redacted my account number, domain and auth token)
```sh
$ lexicon dnsimple --auth-token REDACTED list REDACTED A
Traceback (most recent call last):
File "/usr/local/bin/lexicon", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/lexicon/cli.py", line 117, in main
results = client.execute()
File "/usr/local/lib/python2.7/dist-packages/lexicon/client.py", line 64, in execute
self.provider.authenticate()
File "/usr/local/lib/python2.7/dist-packages/lexicon/providers/dnsimple.py", line 43, in authenticate
'/{0}/domains'.format(account['id']), query_params={'name_like': self.domain})
File "/usr/local/lib/python2.7/dist-packages/lexicon/providers/base.py", line 87, in _get
return self._request('GET', url, query_params=query_params)
File "/usr/local/lib/python2.7/dist-packages/lexicon/providers/dnsimple.py", line 189, in _request
r.raise_for_status()
File "/usr/lib/python2.7/dist-packages/requests/models.py", line 935, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 402 Client Error: Payment Required for url: https://api.dnsimple.com/v2/REDACTED/domains?name_like=REDACTED
```
I believe that this happens because of the way the domain I have is configured. We have a domain configured with a team. There is one account that has a professional plan (with our domain) and we have other users as team members of that account. The only account that has a plan is the one mentioned. Other users have personal accounts by default but they don't have a plan configured at all.
I believe that the code responsible is this: https://github.com/AnalogJ/lexicon/blob/master/lexicon/providers/dnsimple.py#L37-L52
The dnsimple provider iterates through all accounts that the token has access to and tried to see if they have a domain similar to the one specified. When it tries to do this on an account that is not configured it will crash with the http error `402 Client Error: Payment Required for url: ...`.
This is consistent with the documentation from dnsimple: https://developer.dnsimple.com/v2/#response-codes
> `402 Payment Required` - Your account is not subscribed or not in good standing.
I think that it's possible to figure out if an account is not configured by using the `GET /accounts` endpoint. Accounts that are not configured have their `plan_identifier` set to `null`. This does not seem to be documented in the API references tho.
When I do this request I get the following json as output
```json
{
"data": [
{
"created_at": "2018-06-05T19:23:59Z",
"email": "REDACTED",
"id": "REDACTED",
"plan_identifier": "dnsimple-professional",
"updated_at": "2019-01-06T21:16:40Z"
},
{
"created_at": "2018-06-08T19:48:59Z",
"email": "REDACTED",
"id": "REDACTED",
"plan_identifier": null,
"updated_at": "2018-06-08T19:48:59Z"
}
]
}
```
| lexicon/providers/dnsimple.py
<|code_start|>"""Module provider for DNS Simple"""
from __future__ import absolute_import
import json
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['dnsimple.com']
def provider_parser(subparser):
"""Configure provider parser for DNS Simple"""
subparser.add_argument(
"--auth-token", help="specify api token for authentication")
subparser.add_argument(
"--auth-username", help="specify email address for authentication")
subparser.add_argument(
"--auth-password", help="specify password for authentication")
subparser.add_argument(
"--auth-2fa",
help="specify two-factor auth token (OTP) to use with email/password authentication")
class Provider(BaseProvider):
"""Provider class for DNS Simple"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.account_id = None
self.api_endpoint = self._get_provider_option(
'api_endpoint') or 'https://api.dnsimple.com/v2'
def _authenticate(self):
payload = self._get('/accounts')
if not payload[0]['id']:
raise Exception('No account id found')
for account in payload:
dompayload = self._get(
'/{0}/domains'.format(account['id']), query_params={'name_like': self.domain})
if dompayload and dompayload[0]['id']:
self.account_id = account['id']
self.domain_id = dompayload[0]['id']
if not self.account_id:
raise Exception('No domain found like {}'.format(self.domain))
# Create record. If record already exists with the same content, do nothing
def _create_record(self, rtype, name, content):
# check if record already exists
existing_records = self._list_records(rtype, name, content)
if len(existing_records) == 1:
return True
record = {
'type': rtype,
'name': self._relative_name(name),
'content': content
}
if self._get_lexicon_option('ttl'):
record['ttl'] = self._get_lexicon_option('ttl')
if self._get_lexicon_option('priority'):
record['priority'] = self._get_lexicon_option('priority')
if self._get_provider_option('regions'):
record['regions'] = self._get_provider_option('regions')
payload = self._post(
'/{0}/zones/{1}/records'.format(self.account_id, self.domain), record)
LOGGER.debug('create_record: %s', 'id' in payload)
return 'id' in payload
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
filter_query = {}
if rtype:
filter_query['type'] = rtype
if name:
filter_query['name'] = self._relative_name(name)
payload = self._get(
'/{0}/zones/{1}/records'.format(self.account_id, self.domain),
query_params=filter_query)
records = []
for record in payload:
processed_record = {
'type': record['type'],
'name': '{}'.format(
self.domain) if record['name'] == "" else '{0}.{1}'.format(
record['name'],
self.domain),
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']}
if record['priority']:
processed_record['priority'] = record['priority']
records.append(processed_record)
if content:
records = [
record for record in records if record['content'] == content]
LOGGER.debug('list_records: %s', records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {}
if identifier is None:
records = self._list_records(rtype, name, content)
identifiers = [record["id"] for record in records]
else:
identifiers = [identifier]
if name:
data['name'] = self._relative_name(name)
if content:
data['content'] = content
if self._get_lexicon_option('ttl'):
data['ttl'] = self._get_lexicon_option('ttl')
if self._get_lexicon_option('priority'):
data['priority'] = self._get_lexicon_option('priority')
if self._get_provider_option('regions'):
data['regions'] = self._get_provider_option('regions')
for one_identifier in identifiers:
self._patch('/{0}/zones/{1}/records/{2}'
.format(self.account_id, self.domain, one_identifier), data)
LOGGER.debug('update_record: %s', one_identifier)
LOGGER.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete(
'/{0}/zones/{1}/records/{2}'.format(self.account_id, self.domain, record_id))
# is always True at this point; if a non 2xx response is returned, an error is raised.
LOGGER.debug('delete_record: True')
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
default_auth = None
if self._get_provider_option('auth_token'):
default_headers['Authorization'] = "Bearer {0}".format(
self._get_provider_option('auth_token'))
elif (self._get_provider_option('auth_username')
and self._get_provider_option('auth_password')):
default_auth = (self._get_provider_option(
'auth_username'), self._get_provider_option('auth_password'))
if self._get_provider_option('auth_2fa'):
default_headers['X-Dnsimple-OTP'] = self._get_provider_option(
'auth_2fa')
else:
raise Exception('No valid authentication mechanism found')
response = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers=default_headers,
auth=default_auth)
# if the request fails for any reason, throw an error.
response.raise_for_status()
if response.text and response.json()['data'] is None:
raise Exception('No data returned')
return response.json()['data'] if response.text else None
def _patch(self, url='/', data=None, query_params=None):
return self._request('PATCH', url, data=data, query_params=query_params)
<|code_end|>
| lexicon/providers/dnsimple.py
<|code_start|>"""Module provider for DNS Simple"""
from __future__ import absolute_import
import json
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['dnsimple.com']
def provider_parser(subparser):
"""Configure provider parser for DNS Simple"""
subparser.add_argument(
"--auth-token", help="specify api token for authentication")
subparser.add_argument(
"--auth-username", help="specify email address for authentication")
subparser.add_argument(
"--auth-password", help="specify password for authentication")
subparser.add_argument(
"--auth-2fa",
help="specify two-factor auth token (OTP) to use with email/password authentication")
class Provider(BaseProvider):
"""Provider class for DNS Simple"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.account_id = None
self.api_endpoint = self._get_provider_option(
'api_endpoint') or 'https://api.dnsimple.com/v2'
def _authenticate(self):
payload = self._get('/accounts')
if not payload[0]['id']:
raise Exception('No account id found')
for account in payload:
if account['plan_identifier'] is None:
logging.warning(
'Skipping unconfigured account %s (%d). ' \
'To use this account, you must select a plan.',
account['email'], account['id'])
continue
dompayload = self._get(
'/{0}/domains'.format(account['id']), query_params={'name_like': self.domain})
if dompayload and dompayload[0]['id']:
self.account_id = account['id']
self.domain_id = dompayload[0]['id']
if not self.account_id:
raise Exception('No domain found like {}'.format(self.domain))
# Create record. If record already exists with the same content, do nothing
def _create_record(self, rtype, name, content):
# check if record already exists
existing_records = self._list_records(rtype, name, content)
if len(existing_records) == 1:
return True
record = {
'type': rtype,
'name': self._relative_name(name),
'content': content
}
if self._get_lexicon_option('ttl'):
record['ttl'] = self._get_lexicon_option('ttl')
if self._get_lexicon_option('priority'):
record['priority'] = self._get_lexicon_option('priority')
if self._get_provider_option('regions'):
record['regions'] = self._get_provider_option('regions')
payload = self._post(
'/{0}/zones/{1}/records'.format(self.account_id, self.domain), record)
LOGGER.debug('create_record: %s', 'id' in payload)
return 'id' in payload
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
filter_query = {}
if rtype:
filter_query['type'] = rtype
if name:
filter_query['name'] = self._relative_name(name)
payload = self._get(
'/{0}/zones/{1}/records'.format(self.account_id, self.domain),
query_params=filter_query)
records = []
for record in payload:
processed_record = {
'type': record['type'],
'name': '{}'.format(
self.domain) if record['name'] == "" else '{0}.{1}'.format(
record['name'],
self.domain),
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']}
if record['priority']:
processed_record['priority'] = record['priority']
records.append(processed_record)
if content:
records = [
record for record in records if record['content'] == content]
LOGGER.debug('list_records: %s', records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {}
if identifier is None:
records = self._list_records(rtype, name, content)
identifiers = [record["id"] for record in records]
else:
identifiers = [identifier]
if name:
data['name'] = self._relative_name(name)
if content:
data['content'] = content
if self._get_lexicon_option('ttl'):
data['ttl'] = self._get_lexicon_option('ttl')
if self._get_lexicon_option('priority'):
data['priority'] = self._get_lexicon_option('priority')
if self._get_provider_option('regions'):
data['regions'] = self._get_provider_option('regions')
for one_identifier in identifiers:
self._patch('/{0}/zones/{1}/records/{2}'
.format(self.account_id, self.domain, one_identifier), data)
LOGGER.debug('update_record: %s', one_identifier)
LOGGER.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete(
'/{0}/zones/{1}/records/{2}'.format(self.account_id, self.domain, record_id))
# is always True at this point; if a non 2xx response is returned, an error is raised.
LOGGER.debug('delete_record: True')
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
default_auth = None
if self._get_provider_option('auth_token'):
default_headers['Authorization'] = "Bearer {0}".format(
self._get_provider_option('auth_token'))
elif (self._get_provider_option('auth_username')
and self._get_provider_option('auth_password')):
default_auth = (self._get_provider_option(
'auth_username'), self._get_provider_option('auth_password'))
if self._get_provider_option('auth_2fa'):
default_headers['X-Dnsimple-OTP'] = self._get_provider_option(
'auth_2fa')
else:
raise Exception('No valid authentication mechanism found')
response = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers=default_headers,
auth=default_auth)
# if the request fails for any reason, throw an error.
response.raise_for_status()
if response.text and response.json()['data'] is None:
raise Exception('No data returned')
return response.json()['data'] if response.text else None
def _patch(self, url='/', data=None, query_params=None):
return self._request('PATCH', url, data=data, query_params=query_params)
<|code_end|>
|
PowerDNS dot notation
From PowerDNS 4.2 onwards, undotted notation is not supported.
From PowerDNS >=4.0 but <=4.1 the dotted notation is _optional_.
I suggest we implement the dotted notation _by default_ since that will work with both dotted and undotted versions.
We could have a flag to turn this behavior for those running some legacy versions (which doesn't support dotted notation) or simply add support for PowerDNS >=4.0.
| lexicon/providers/powerdns.py
<|code_start|>"""
Lexicon PowerDNS Provider
Author: Will Hughes, 2017
API Docs: https://doc.powerdns.com/md/httpapi/api_spec/
Implementation notes:
* The PowerDNS API does not assign a unique identifier to each record in the way
that Lexicon expects. We work around this by creating an ID based on the record
name, type and content, which when taken together are always unique
* The PowerDNS API has no notion of 'create a single record' or 'delete a single
record'. All operations are either 'replace the RRSet with this new set of records'
or 'delete all records for this name and type. Similarly, there is no notion of
'change the content of this record', because records are identified by their name,
type and content.
* The API is very picky about the format of values used when creating records:
** CNAMEs must be fully qualified
** TXT, LOC records must be quoted
This is why the _clean_content and _unclean_content methods exist, to convert
back and forth between the format PowerDNS expects, and the format Lexicon uses
"""
from __future__ import absolute_import
import json
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = []
def provider_parser(subparser):
"""Configure provider parser for powerdns"""
subparser.add_argument(
"--auth-token", help="specify token for authentication")
subparser.add_argument("--pdns-server", help="URI for PowerDNS server")
subparser.add_argument(
"--pdns-server-id", help="Server ID to interact with")
subparser.add_argument(
"--pdns-disable-notify", help="Disable slave notifications from master")
class Provider(BaseProvider):
"""Provider class for PowerDNS"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api_endpoint = self._get_provider_option('pdns_server')
self.disable_slave_notify = self._get_provider_option('pdns-disable-notify')
if self.api_endpoint.endswith('/'):
self.api_endpoint = self.api_endpoint[:-1]
if not self.api_endpoint.endswith("/api/v1"):
self.api_endpoint += "/api/v1"
self.server_id = self._get_provider_option('pdns_server_id')
if self.server_id is None:
self.server_id = 'localhost'
self.api_endpoint += "/servers/" + self.server_id
self.api_key = self._get_provider_option('auth_token')
assert self.api_key is not None
self._zone_data = None
def notify_slaves(self):
"""Checks to see if slaves should be notified, and notifies them if needed"""
if self.disable_slave_notify is not None:
LOGGER.debug('Slave notifications disabled')
return False
if self.zone_data()['kind'] == 'Master':
response_code = self._put('/zones/' + self.domain + '/notify').status_code
if response_code == 200:
LOGGER.debug('Slave(s) notified')
return True
LOGGER.debug('Slave notification failed with code %i', response_code)
else:
LOGGER.debug('Zone type should be \'Master\' for slave notifications')
return False
def zone_data(self):
"""Get zone data"""
if self._zone_data is None:
self._zone_data = self._get('/zones/' + self.domain).json()
return self._zone_data
def _authenticate(self):
self.zone_data()
self.domain_id = self.domain
def _make_identifier(self, rtype, name, content): # pylint: disable=no-self-use
return "{}/{}={}".format(rtype, name, content)
def _parse_identifier(self, identifier): # pylint: disable=no-self-use
parts = identifier.split('/')
rtype = parts[0]
parts = parts[1].split('=')
name = parts[0]
content = "=".join(parts[1:])
return rtype, name, content
def _list_records(self, rtype=None, name=None, content=None):
records = []
for rrset in self.zone_data()['rrsets']:
if (name is None or self._fqdn_name(rrset['name']) == self._fqdn_name(
name)) and (rtype is None or rrset['type'] == rtype):
for record in rrset['records']:
if content is None or record['content'] == self._clean_content(rtype, content):
records.append({
'type': rrset['type'],
'name': self._full_name(rrset['name']),
'ttl': rrset['ttl'],
'content': self._unclean_content(rrset['type'], record['content']),
'id': self._make_identifier(rrset['type'],
rrset['name'], record['content'])
})
LOGGER.debug('list_records: %s', records)
return records
def _clean_content(self, rtype, content):
if rtype in ("TXT", "LOC"):
if content[0] != '"':
content = '"' + content
if content[-1] != '"':
content += '"'
elif rtype == "CNAME":
content = self._fqdn_name(content)
return content
def _unclean_content(self, rtype, content):
if rtype in ("TXT", "LOC"):
content = content.strip('"')
elif rtype == "CNAME":
content = self._full_name(content)
return content
def _create_record(self, rtype, name, content):
rname = self._fqdn_name(name)
newcontent = self._clean_content(rtype, content)
updated_data = {
'name': rname,
'type': rtype,
'records': [],
'ttl': self._get_lexicon_option('ttl') or 600,
'changetype': 'REPLACE'
}
updated_data['records'].append({'content': newcontent, 'disabled': False})
for rrset in self.zone_data()['rrsets']:
if rrset['name'] == rname and rrset['type'] == rtype:
updated_data['ttl'] = rrset['ttl']
for record in rrset['records']:
if record['content'] != newcontent:
updated_data['records'].append(
{
'content': record['content'],
'disabled': record['disabled']
})
break
request = {'rrsets': [updated_data]}
LOGGER.debug('request: %s', request)
self._patch('/zones/' + self.domain, data=request)
self.notify_slaves()
self._zone_data = None
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier is not None:
rtype, name, content = self._parse_identifier(identifier)
LOGGER.debug("delete %s %s %s", rtype, name, content)
if rtype is None or name is None:
raise Exception("Must specify at least both rtype and name")
for rrset in self.zone_data()['rrsets']:
if rrset['type'] == rtype and self._fqdn_name(rrset['name']) == self._fqdn_name(name):
update_data = rrset
if 'comments' in update_data:
del update_data['comments']
if content is None:
update_data['records'] = []
update_data['changetype'] = 'DELETE'
else:
new_record_list = []
for record in update_data['records']:
if self._clean_content(rrset['type'], content) != record['content']:
new_record_list.append(record)
update_data['records'] = new_record_list
update_data['changetype'] = 'REPLACE'
break
request = {'rrsets': [update_data]}
LOGGER.debug('request: %s', request)
self._patch('/zones/' + self.domain, data=request)
self.notify_slaves()
self._zone_data = None
return True
def _update_record(self, identifier, rtype=None, name=None, content=None):
self._delete_record(identifier)
return self._create_record(rtype, name, content)
def _patch(self, url='/', data=None, query_params=None):
return self._request('PATCH', url, data=data, query_params=query_params)
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers={
'X-API-Key': self.api_key,
'Content-Type': 'application/json'
})
LOGGER.debug('response: %s', response.text)
response.raise_for_status()
return response
<|code_end|>
| lexicon/providers/powerdns.py
<|code_start|>"""
Lexicon PowerDNS Provider
Author: Will Hughes, 2017
API Docs: https://doc.powerdns.com/md/httpapi/api_spec/
Implementation notes:
* The PowerDNS API does not assign a unique identifier to each record in the way
that Lexicon expects. We work around this by creating an ID based on the record
name, type and content, which when taken together are always unique
* The PowerDNS API has no notion of 'create a single record' or 'delete a single
record'. All operations are either 'replace the RRSet with this new set of records'
or 'delete all records for this name and type. Similarly, there is no notion of
'change the content of this record', because records are identified by their name,
type and content.
* The API is very picky about the format of values used when creating records:
** CNAMEs must be fully qualified
** TXT, LOC records must be quoted
This is why the _clean_content and _unclean_content methods exist, to convert
back and forth between the format PowerDNS expects, and the format Lexicon uses
"""
from __future__ import absolute_import
import json
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = []
def provider_parser(subparser):
"""Configure provider parser for powerdns"""
subparser.add_argument(
"--auth-token", help="specify token for authentication")
subparser.add_argument("--pdns-server", help="URI for PowerDNS server")
subparser.add_argument(
"--pdns-server-id", help="Server ID to interact with")
subparser.add_argument(
"--pdns-disable-notify", help="Disable slave notifications from master")
class Provider(BaseProvider):
"""Provider class for PowerDNS"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api_endpoint = self._get_provider_option('pdns_server')
self.disable_slave_notify = self._get_provider_option('pdns-disable-notify')
if self.api_endpoint.endswith('/'):
self.api_endpoint = self.api_endpoint[:-1]
if not self.api_endpoint.endswith("/api/v1"):
self.api_endpoint += "/api/v1"
self.server_id = self._get_provider_option('pdns_server_id')
if self.server_id is None:
self.server_id = 'localhost'
self.api_endpoint += "/servers/" + self.server_id
self.api_key = self._get_provider_option('auth_token')
assert self.api_key is not None
self._zone_data = None
def notify_slaves(self):
"""Checks to see if slaves should be notified, and notifies them if needed"""
if self.disable_slave_notify is not None:
LOGGER.debug('Slave notifications disabled')
return False
if self.zone_data()['kind'] == 'Master':
response_code = self._put('/zones/' + self.domain + '/notify').status_code
if response_code == 200:
LOGGER.debug('Slave(s) notified')
return True
LOGGER.debug('Slave notification failed with code %i', response_code)
else:
LOGGER.debug('Zone type should be \'Master\' for slave notifications')
return False
def zone_data(self):
"""Get zone data"""
if self._zone_data is None:
self._zone_data = self._get('/zones/' + self._ensure_dot(self.domain)).json()
return self._zone_data
def _authenticate(self):
self.zone_data()
self.domain_id = self.domain
def _make_identifier(self, rtype, name, content): # pylint: disable=no-self-use
return "{}/{}={}".format(rtype, name, content)
def _parse_identifier(self, identifier): # pylint: disable=no-self-use
parts = identifier.split('/')
rtype = parts[0]
parts = parts[1].split('=')
name = parts[0]
content = "=".join(parts[1:])
return rtype, name, content
def _list_records(self, rtype=None, name=None, content=None):
records = []
for rrset in self.zone_data()['rrsets']:
if (name is None or self._fqdn_name(rrset['name']) == self._fqdn_name(
name)) and (rtype is None or rrset['type'] == rtype):
for record in rrset['records']:
if content is None or record['content'] == self._clean_content(rtype, content):
records.append({
'type': rrset['type'],
'name': self._full_name(rrset['name']),
'ttl': rrset['ttl'],
'content': self._unclean_content(rrset['type'], record['content']),
'id': self._make_identifier(rrset['type'],
rrset['name'], record['content'])
})
LOGGER.debug('list_records: %s', records)
return records
def _clean_content(self, rtype, content):
if rtype in ("TXT", "LOC"):
if content[0] != '"':
content = '"' + content
if content[-1] != '"':
content += '"'
elif rtype == "CNAME":
content = self._fqdn_name(content)
return content
def _unclean_content(self, rtype, content):
if rtype in ("TXT", "LOC"):
content = content.strip('"')
elif rtype == "CNAME":
content = self._full_name(content)
return content
def _create_record(self, rtype, name, content):
rname = self._fqdn_name(name)
newcontent = self._clean_content(rtype, content)
updated_data = {
'name': rname,
'type': rtype,
'records': [],
'ttl': self._get_lexicon_option('ttl') or 600,
'changetype': 'REPLACE'
}
updated_data['records'].append({'content': newcontent, 'disabled': False})
for rrset in self.zone_data()['rrsets']:
if rrset['name'] == rname and rrset['type'] == rtype:
updated_data['ttl'] = rrset['ttl']
for record in rrset['records']:
if record['content'] != newcontent:
updated_data['records'].append(
{
'content': record['content'],
'disabled': record['disabled']
})
break
request = {'rrsets': [updated_data]}
LOGGER.debug('request: %s', request)
self._patch('/zones/' + self._ensure_dot(self.domain), data=request)
self.notify_slaves()
self._zone_data = None
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier is not None:
rtype, name, content = self._parse_identifier(identifier)
LOGGER.debug("delete %s %s %s", rtype, name, content)
if rtype is None or name is None:
raise Exception("Must specify at least both rtype and name")
for rrset in self.zone_data()['rrsets']:
if rrset['type'] == rtype and self._fqdn_name(rrset['name']) == self._fqdn_name(name):
update_data = rrset
if 'comments' in update_data:
del update_data['comments']
if content is None:
update_data['records'] = []
update_data['changetype'] = 'DELETE'
else:
new_record_list = []
for record in update_data['records']:
if self._clean_content(rrset['type'], content) != record['content']:
new_record_list.append(record)
update_data['records'] = new_record_list
update_data['changetype'] = 'REPLACE'
break
request = {'rrsets': [update_data]}
LOGGER.debug('request: %s', request)
self._patch('/zones/' + self._ensure_dot(self.domain), data=request)
self.notify_slaves()
self._zone_data = None
return True
def _update_record(self, identifier, rtype=None, name=None, content=None):
self._delete_record(identifier)
return self._create_record(rtype, name, content)
def _patch(self, url='/', data=None, query_params=None):
return self._request('PATCH', url, data=data, query_params=query_params)
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers={
'X-API-Key': self.api_key,
'Content-Type': 'application/json'
})
LOGGER.debug('response: %s', response.text)
response.raise_for_status()
return response
@classmethod
def _ensure_dot(cls, text):
"""
This function makes sure a string contains a dot at the end
"""
if text.endswith("."):
return text
return text + "."
<|code_end|>
|
gandi: updating deletes all record of same rtype
When using Gandi provider, if you update a specific record with a specific type, all records with a matching name will be deleted.
The if-condition handling the case is not correct.
| lexicon/providers/gandi.py
<|code_start|>"""Provide support to Lexicon for Gandi LiveDNS and Gandi XMLRPC changes.
Lexicon provides a common interface for querying and managing DNS services
through those services' APIs. This module implements the Lexicon interface
against the Gandi API.
Gandi introduced the LiveDNS API (http://doc.livedns.gandi.net/) in 2017.
It is the successor of the traditional XMLRPC API, which suffered from
long delays between API-based changes and their activation.
The LiveDNS API has one significant peculiarity: DNS records with the
same name and type are managed as one unit. Thus records cannot be
addressed distinctly, which complicates removal of records significantly.
The Gandi XMLRPC API is different from typical DNS APIs in that Gandi
zone changes are atomic. You cannot edit the currently active
configuration. Any changes require editing either a new or inactive
configuration. Once the changes are committed, then the domain is switched
to using the new zone configuration. This module makes no attempt to
cleanup previous zone configurations.
Note that Gandi domains can share zone configurations. In other words,
I can have domain-a.com and domain-b.com which share the same zone
configuration file. If I make changes to domain-a.com, those changes
will only apply to domain-a.com, as domain-b.com will continue using
the previous version of the zone configuration. This module makes no
attempt to detect and account for that.
"""
from __future__ import absolute_import
import json
import logging
from builtins import object
import requests
from lexicon.providers.base import Provider as BaseProvider
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['gandi.net']
def provider_parser(subparser):
"""Specify arguments for Gandi Lexicon Provider."""
subparser.add_argument('--auth-token', help="specify Gandi API key")
subparser.add_argument(
'--api-protocol',
help="(optional) specify Gandi API protocol to use: rpc (default) or rest")
class Provider(BaseProvider):
"""Provide Gandi LiveDNS API implementation of Lexicon Provider interface.
Note that this implementation will delegates its call to GandiRPCSubProvider
if RPC protocol is used.
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.default_ttl = 3600
self.protocol = self._get_provider_option('api_protocol') or 'rpc'
if self.protocol != 'rpc' and self.protocol != 'rest':
raise ValueError(
"Invalid API protocol specified, should be 'rpc' or 'rest'")
if self.protocol == 'rpc':
self.rpc_helper = GandiRPCSubProvider(self._get_provider_option('auth_token'),
'https://rpc.gandi.net/xmlrpc/',
self.domain.lower(),
self._relative_name,
self._full_name)
else:
self.api_endpoint = 'https://dns.api.gandi.net/api/v5'
def _authenticate(self):
if self.protocol == 'rpc':
domain_id = self.rpc_helper.authenticate()
self.domain_id = domain_id
else:
self._get('/domains/{0}'.format(self.domain))
self.domain_id = self.domain.lower()
def _create_record(self, rtype, name, content):
if self.protocol == 'rpc':
return self.rpc_helper.create_record(rtype, self._relative_name(
name), content, self._get_lexicon_option('ttl') or self.default_ttl)
current_values = [record['content']
for record in self._list_records(rtype=rtype, name=name)]
if current_values != [content]:
# a change is necessary
url = '/domains/{0}/records/{1}/{2}'.format(
self.domain_id, self._relative_name(name), rtype)
if current_values:
record = {'rrset_values': current_values + [content]}
self._put(url, record)
else:
record = {'rrset_values': [content]}
# add the ttl, if this is a new record
if self._get_lexicon_option('ttl'):
record['rrset_ttl'] = self._get_lexicon_option('ttl')
self._post(url, record)
LOGGER.debug('create_record: %s', True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
if self.protocol == 'rpc':
return self.rpc_helper.list_records(rtype, name, content)
try:
if name is not None:
if rtype is not None:
query_results = [self._get(
'/domains/{0}/records/{1}/{2}'
.format(self.domain_id, self._relative_name(name), rtype))]
else:
query_results = self._get('/domains/{0}/records/{1}'
.format(self.domain_id, self._relative_name(name)))
else:
query_results = self._get(
'/domains/{0}/records'.format(self.domain_id))
if rtype is not None:
query_results = [
item for item in query_results if item['rrset_type'] == rtype]
except requests.exceptions.HTTPError as error:
if error.response.status_code == 404:
query_results = []
else:
raise
# convert records with multiple values into single-value records
records = []
for query_result in query_results:
for value in query_result['rrset_values']:
record = {
'type': query_result['rrset_type'],
'name': self._full_name(query_result['rrset_name']),
'ttl': query_result['rrset_ttl'],
'content': value,
'id': query_result['rrset_name'],
}
# cleanup potential quoting if suitable
self._clean_TXT_record(record)
records.append(record)
# filter for content, if requested
if content is not None:
records = [
record for record in records if record['content'] == content]
LOGGER.debug('list_records: %s', records)
return records
# Update a record. Identifier must be specified.
def _update_record(self, identifier, rtype=None, name=None, content=None):
"""Updates the specified record in a new Gandi zone
'content' should be a string or a list of strings
"""
if self.protocol == 'rpc':
return self.rpc_helper.update_record(identifier, rtype, name, content)
data = {}
if rtype:
data['rrset_type'] = rtype
if name:
data['rrset_name'] = self._relative_name(name)
if content:
if isinstance(content, (list, tuple, set)):
data['rrset_values'] = list(content)
else:
data['rrset_values'] = [content]
if rtype is None:
# replace the records of a specific rtype
url = '/domains/{0}/records/{1}/{2}'.format(self.domain_id,
identifier or self._relative_name(
name),
rtype)
self._put(url, data)
else:
# replace all records with a matching name
url = '/domains/{0}/records/{1}'.format(self.domain_id,
identifier or self._relative_name(name))
self._put(url, {'items': [data]})
LOGGER.debug('update_record: %s', True)
return True
# Delete existings records.
# If records do not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if self.protocol == 'rpc':
return self.rpc_helper.delete_record(identifier, rtype, name, content)
if not identifier:
remove_count = 0
# get all matching (by rtype and name) records - ignore 'content' for now
records = self._list_records(rtype=rtype, name=name)
for current_type in set(record['type'] for record in records):
matching_records = [
record for record in records if record['type'] == current_type]
# collect all non-matching values
if content is None:
remaining_values = []
else:
remaining_values = [record['content'] for record in matching_records
if record['content'] != content]
url = '/domains/{0}/records/{1}/{2}'.format(
self.domain_id, self._relative_name(name), current_type)
if len(matching_records) == len(remaining_values):
# no matching item should be removed for this rtype
pass
elif remaining_values:
# reduce the list of values
self._put(url, {'rrset_values': remaining_values})
remove_count += 1
else:
# remove the complete record (possibly with multiple values)
self._delete(url)
remove_count += 1
if remove_count == 0:
raise Exception('Record identifier could not be found.')
else:
self._delete(
'/domains/{0}/records/{1}'.format(self.domain_id, identifier))
# is always True at this point, if a non 200 response is returned an error is raised.
LOGGER.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Api-Key': self._get_provider_option('auth_token')
}
if not url.startswith(self.api_endpoint):
url = self.api_endpoint + url
response = requests.request(action, url, params=query_params,
data=json.dumps(data),
headers=default_headers)
# if the request fails for any reason, throw an error.
response.raise_for_status()
if action == 'DELETE':
return ''
return response.json()
class GandiRPCSubProvider(object): # pylint: disable=useless-object-inheritance
"""Provide Gandi RPCXML API implementation of Lexicon Provider interface.
This implementation is called through the main LiveDNS implementation
is RPC protocol is used.
"""
def __init__(self, api_key, api_endpoint, domain, relative_name_fn, full_name_fn): # pylint: disable=too-many-arguments
"""Initialize Gandi RCPXML API provider."""
super(GandiRPCSubProvider, self).__init__()
self._api_endpoint = api_endpoint
self._api_key = api_key
self._domain = domain
self._relative_name = relative_name_fn
self._full_name = full_name_fn
self._api = xmlrpclib.ServerProxy(self._api_endpoint, allow_none=True)
self._zone_id = None
# Authenticate against provider,
# Make any requests required to get the domain's id for this provider,
# so it can be used in subsequent calls. Should throw an error if
# authentication fails for any reason, or if the domain does not exist.
def authenticate(self):
"""Determine the current domain and zone IDs for the domain."""
try:
payload = self._api.domain.info(self._api_key, self._domain)
self._zone_id = payload['zone_id']
return payload['id']
except xmlrpclib.Fault as err:
raise Exception("Failed to authenticate: '{0}'".format(err))
# Create record. If record already exists with the same content, do nothing.
def create_record(self, rtype, name, content, ttl):
"""Creates a record for the domain in a new Gandi zone."""
version = None
ret = False
# This isn't quite "do nothing" if the record already exists.
# In this case, no new record will be created, but a new zone version
# will be created and set.
try:
version = self._api.domain.zone.version.new(
self._api_key, self._zone_id)
self._api.domain.zone.record.add(self._api_key, self._zone_id, version,
{'type': rtype.upper(),
'name': name,
'value': content,
'ttl': ttl
})
self._api.domain.zone.version.set(
self._api_key, self._zone_id, version)
ret = True
finally:
if not ret and version is not None:
self._api.domain.zone.version.delete(
self._api_key, self._zone_id, version)
LOGGER.debug("create_record: %s", ret)
return ret
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, rtype=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
opts = {}
if rtype is not None:
opts['type'] = rtype.upper()
if name is not None:
opts['name'] = self._relative_name(name)
if content is not None:
opts['value'] = self._txt_encode(content) if opts.get(
'type', '') == 'TXT' else content
records = []
payload = self._api.domain.zone.record.list(
self._api_key, self._zone_id, 0, opts)
for record in payload:
processed_record = {
'type': record['type'],
'name': self._full_name(record['name']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
# Gandi will add quotes to all TXT record strings
if processed_record['type'] == 'TXT':
processed_record['content'] = self._txt_decode(
processed_record['content'])
records.append(processed_record)
LOGGER.debug("list_records: %s", records)
return records
# Update a record. Identifier or type+name+content
def update_record(self, identifier, rtype=None, name=None, content=None): # pylint: disable=too-many-branches
"""Updates the specified record in a new Gandi zone."""
if not identifier:
records = self.list_records(rtype, name)
if len(records) == 1:
identifier = records[0]['id']
elif len(records) > 1:
raise Exception('Several record identifiers match the request')
else:
raise Exception('Record identifier could not be found')
identifier = str(identifier)
version = None
# Gandi doesn't allow you to edit records on the active zone file.
# Gandi also doesn't persist zone record identifiers when creating
# a new zone file. To update by identifier, we lookup the record
# by identifier, then use the record fields to find the record in
# the newly created zone.
records = self._api.domain.zone.record.list(
self._api_key, self._zone_id, 0, {'id': identifier})
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self._api.domain.zone.version.new(
self._api_key, self._zone_id)
records = self._api.domain.zone.record.list(
self._api_key, self._zone_id, version, rec)
if len(records) != 1:
raise self.GandiInternalError("expected one record")
if rtype is not None:
rec['type'] = rtype.upper()
if name is not None:
rec['name'] = self._relative_name(name)
if content is not None:
rec['value'] = self._txt_encode(
content) if rec['type'] == 'TXT' else content
records = self._api.domain.zone.record.update(
self._api_key, self._zone_id, version, {'id': records[0]['id']}, rec)
if len(records) != 1:
raise self.GandiInternalError(
"Expected one updated record")
self._api.domain.zone.version.set(
self._api_key, self._zone_id, version)
ret = True
except self.GandiInternalError:
pass
finally:
if not ret and version is not None:
self._api.domain.zone.version.delete(
self._api_key, self._zone_id, version)
LOGGER.debug("update_record: %s", ret)
return ret
# Delete existing records.
# If records do not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""Removes the specified records in a new Gandi zone."""
version = None
ret = False
opts = {}
if identifier is not None:
opts['id'] = identifier
else:
if not rtype and not name and not content:
raise ValueError(
'Error, at least one parameter from type, name or content must be set')
if rtype:
opts['type'] = rtype.upper()
if name:
opts['name'] = self._relative_name(name)
if content:
opts['value'] = self._txt_encode(
content) if opts['type'] == 'TXT' else content
records = self._api.domain.zone.record.list(
self._api_key, self._zone_id, 0, opts)
if records:
try:
version = self._api.domain.zone.version.new(
self._api_key, self._zone_id)
for record in records:
del record['id']
self._api.domain.zone.record.delete(
self._api_key, self._zone_id, version, record)
self._api.domain.zone.version.set(
self._api_key, self._zone_id, version)
ret = True
finally:
if not ret and version is not None:
self._api.domain.zone.version.delete(
self._api_key, self._zone_id, version)
LOGGER.debug("delete_record: %s", ret)
return ret
@staticmethod
def _txt_encode(val):
if not val:
return None
return ''.join(['"', val.replace('\\', '\\\\').replace('"', '\\"'), '"'])
@staticmethod
def _txt_decode(val):
if not val:
return None
if len(val) > 1 and val[0:1] == '"':
val = val[1:-1].replace('" "', '').replace('\\"',
'"').replace('\\\\', '\\')
return val
# This exception is for cleaner handling of internal errors
# within the Gandi provider codebase
class GandiInternalError(Exception):
"""Internal exception handling class for Gandi management errors"""
<|code_end|>
| lexicon/providers/gandi.py
<|code_start|>"""Provide support to Lexicon for Gandi LiveDNS and Gandi XMLRPC changes.
Lexicon provides a common interface for querying and managing DNS services
through those services' APIs. This module implements the Lexicon interface
against the Gandi API.
Gandi introduced the LiveDNS API (http://doc.livedns.gandi.net/) in 2017.
It is the successor of the traditional XMLRPC API, which suffered from
long delays between API-based changes and their activation.
The LiveDNS API has one significant peculiarity: DNS records with the
same name and type are managed as one unit. Thus records cannot be
addressed distinctly, which complicates removal of records significantly.
The Gandi XMLRPC API is different from typical DNS APIs in that Gandi
zone changes are atomic. You cannot edit the currently active
configuration. Any changes require editing either a new or inactive
configuration. Once the changes are committed, then the domain is switched
to using the new zone configuration. This module makes no attempt to
cleanup previous zone configurations.
Note that Gandi domains can share zone configurations. In other words,
I can have domain-a.com and domain-b.com which share the same zone
configuration file. If I make changes to domain-a.com, those changes
will only apply to domain-a.com, as domain-b.com will continue using
the previous version of the zone configuration. This module makes no
attempt to detect and account for that.
"""
from __future__ import absolute_import
import json
import logging
from builtins import object
import requests
from lexicon.providers.base import Provider as BaseProvider
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['gandi.net']
def provider_parser(subparser):
"""Specify arguments for Gandi Lexicon Provider."""
subparser.add_argument('--auth-token', help="specify Gandi API key")
subparser.add_argument(
'--api-protocol',
help="(optional) specify Gandi API protocol to use: rpc (default) or rest")
class Provider(BaseProvider):
"""Provide Gandi LiveDNS API implementation of Lexicon Provider interface.
Note that this implementation will delegates its call to GandiRPCSubProvider
if RPC protocol is used.
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.default_ttl = 3600
self.protocol = self._get_provider_option('api_protocol') or 'rpc'
if self.protocol != 'rpc' and self.protocol != 'rest':
raise ValueError(
"Invalid API protocol specified, should be 'rpc' or 'rest'")
if self.protocol == 'rpc':
self.rpc_helper = GandiRPCSubProvider(self._get_provider_option('auth_token'),
'https://rpc.gandi.net/xmlrpc/',
self.domain.lower(),
self._relative_name,
self._full_name)
else:
self.api_endpoint = 'https://dns.api.gandi.net/api/v5'
def _authenticate(self):
if self.protocol == 'rpc':
domain_id = self.rpc_helper.authenticate()
self.domain_id = domain_id
else:
self._get('/domains/{0}'.format(self.domain))
self.domain_id = self.domain.lower()
def _create_record(self, rtype, name, content):
if self.protocol == 'rpc':
return self.rpc_helper.create_record(rtype, self._relative_name(
name), content, self._get_lexicon_option('ttl') or self.default_ttl)
current_values = [record['content']
for record in self._list_records(rtype=rtype, name=name)]
if current_values != [content]:
# a change is necessary
url = '/domains/{0}/records/{1}/{2}'.format(
self.domain_id, self._relative_name(name), rtype)
if current_values:
record = {'rrset_values': current_values + [content]}
self._put(url, record)
else:
record = {'rrset_values': [content]}
# add the ttl, if this is a new record
if self._get_lexicon_option('ttl'):
record['rrset_ttl'] = self._get_lexicon_option('ttl')
self._post(url, record)
LOGGER.debug('create_record: %s', True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
if self.protocol == 'rpc':
return self.rpc_helper.list_records(rtype, name, content)
try:
if name is not None:
if rtype is not None:
query_results = [self._get(
'/domains/{0}/records/{1}/{2}'
.format(self.domain_id, self._relative_name(name), rtype))]
else:
query_results = self._get('/domains/{0}/records/{1}'
.format(self.domain_id, self._relative_name(name)))
else:
query_results = self._get(
'/domains/{0}/records'.format(self.domain_id))
if rtype is not None:
query_results = [
item for item in query_results if item['rrset_type'] == rtype]
except requests.exceptions.HTTPError as error:
if error.response.status_code == 404:
query_results = []
else:
raise
# convert records with multiple values into single-value records
records = []
for query_result in query_results:
for value in query_result['rrset_values']:
record = {
'type': query_result['rrset_type'],
'name': self._full_name(query_result['rrset_name']),
'ttl': query_result['rrset_ttl'],
'content': value,
'id': query_result['rrset_name'],
}
# cleanup potential quoting if suitable
self._clean_TXT_record(record)
records.append(record)
# filter for content, if requested
if content is not None:
records = [
record for record in records if record['content'] == content]
LOGGER.debug('list_records: %s', records)
return records
# Update a record. Identifier must be specified.
def _update_record(self, identifier, rtype=None, name=None, content=None):
"""Updates the specified record in a new Gandi zone
'content' should be a string or a list of strings
"""
if self.protocol == 'rpc':
return self.rpc_helper.update_record(identifier, rtype, name, content)
data = {}
if rtype:
data['rrset_type'] = rtype
if name:
data['rrset_name'] = self._relative_name(name)
if content:
if isinstance(content, (list, tuple, set)):
data['rrset_values'] = list(content)
else:
data['rrset_values'] = [content]
if rtype is not None:
# replace the records of a specific rtype
url = '/domains/{0}/records/{1}/{2}'.format(self.domain_id,
identifier or self._relative_name(
name),
rtype)
self._put(url, data)
else:
# replace all records with a matching name
url = '/domains/{0}/records/{1}'.format(self.domain_id,
identifier or self._relative_name(name))
self._put(url, {'items': [data]})
LOGGER.debug('update_record: %s', True)
return True
# Delete existings records.
# If records do not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if self.protocol == 'rpc':
return self.rpc_helper.delete_record(identifier, rtype, name, content)
if not identifier:
remove_count = 0
# get all matching (by rtype and name) records - ignore 'content' for now
records = self._list_records(rtype=rtype, name=name)
for current_type in set(record['type'] for record in records):
matching_records = [
record for record in records if record['type'] == current_type]
# collect all non-matching values
if content is None:
remaining_values = []
else:
remaining_values = [record['content'] for record in matching_records
if record['content'] != content]
url = '/domains/{0}/records/{1}/{2}'.format(
self.domain_id, self._relative_name(name), current_type)
if len(matching_records) == len(remaining_values):
# no matching item should be removed for this rtype
pass
elif remaining_values:
# reduce the list of values
self._put(url, {'rrset_values': remaining_values})
remove_count += 1
else:
# remove the complete record (possibly with multiple values)
self._delete(url)
remove_count += 1
if remove_count == 0:
raise Exception('Record identifier could not be found.')
else:
self._delete(
'/domains/{0}/records/{1}'.format(self.domain_id, identifier))
# is always True at this point, if a non 200 response is returned an error is raised.
LOGGER.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Api-Key': self._get_provider_option('auth_token')
}
if not url.startswith(self.api_endpoint):
url = self.api_endpoint + url
response = requests.request(action, url, params=query_params,
data=json.dumps(data),
headers=default_headers)
# if the request fails for any reason, throw an error.
response.raise_for_status()
if action == 'DELETE':
return ''
return response.json()
class GandiRPCSubProvider(object): # pylint: disable=useless-object-inheritance
"""Provide Gandi RPCXML API implementation of Lexicon Provider interface.
This implementation is called through the main LiveDNS implementation
is RPC protocol is used.
"""
def __init__(self, api_key, api_endpoint, domain, relative_name_fn, full_name_fn): # pylint: disable=too-many-arguments
"""Initialize Gandi RCPXML API provider."""
super(GandiRPCSubProvider, self).__init__()
self._api_endpoint = api_endpoint
self._api_key = api_key
self._domain = domain
self._relative_name = relative_name_fn
self._full_name = full_name_fn
self._api = xmlrpclib.ServerProxy(self._api_endpoint, allow_none=True)
self._zone_id = None
# Authenticate against provider,
# Make any requests required to get the domain's id for this provider,
# so it can be used in subsequent calls. Should throw an error if
# authentication fails for any reason, or if the domain does not exist.
def authenticate(self):
"""Determine the current domain and zone IDs for the domain."""
try:
payload = self._api.domain.info(self._api_key, self._domain)
self._zone_id = payload['zone_id']
return payload['id']
except xmlrpclib.Fault as err:
raise Exception("Failed to authenticate: '{0}'".format(err))
# Create record. If record already exists with the same content, do nothing.
def create_record(self, rtype, name, content, ttl):
"""Creates a record for the domain in a new Gandi zone."""
version = None
ret = False
# This isn't quite "do nothing" if the record already exists.
# In this case, no new record will be created, but a new zone version
# will be created and set.
try:
version = self._api.domain.zone.version.new(
self._api_key, self._zone_id)
self._api.domain.zone.record.add(self._api_key, self._zone_id, version,
{'type': rtype.upper(),
'name': name,
'value': content,
'ttl': ttl
})
self._api.domain.zone.version.set(
self._api_key, self._zone_id, version)
ret = True
finally:
if not ret and version is not None:
self._api.domain.zone.version.delete(
self._api_key, self._zone_id, version)
LOGGER.debug("create_record: %s", ret)
return ret
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, rtype=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
opts = {}
if rtype is not None:
opts['type'] = rtype.upper()
if name is not None:
opts['name'] = self._relative_name(name)
if content is not None:
opts['value'] = self._txt_encode(content) if opts.get(
'type', '') == 'TXT' else content
records = []
payload = self._api.domain.zone.record.list(
self._api_key, self._zone_id, 0, opts)
for record in payload:
processed_record = {
'type': record['type'],
'name': self._full_name(record['name']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
# Gandi will add quotes to all TXT record strings
if processed_record['type'] == 'TXT':
processed_record['content'] = self._txt_decode(
processed_record['content'])
records.append(processed_record)
LOGGER.debug("list_records: %s", records)
return records
# Update a record. Identifier or type+name+content
def update_record(self, identifier, rtype=None, name=None, content=None): # pylint: disable=too-many-branches
"""Updates the specified record in a new Gandi zone."""
if not identifier:
records = self.list_records(rtype, name)
if len(records) == 1:
identifier = records[0]['id']
elif len(records) > 1:
raise Exception('Several record identifiers match the request')
else:
raise Exception('Record identifier could not be found')
identifier = str(identifier)
version = None
# Gandi doesn't allow you to edit records on the active zone file.
# Gandi also doesn't persist zone record identifiers when creating
# a new zone file. To update by identifier, we lookup the record
# by identifier, then use the record fields to find the record in
# the newly created zone.
records = self._api.domain.zone.record.list(
self._api_key, self._zone_id, 0, {'id': identifier})
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self._api.domain.zone.version.new(
self._api_key, self._zone_id)
records = self._api.domain.zone.record.list(
self._api_key, self._zone_id, version, rec)
if len(records) != 1:
raise self.GandiInternalError("expected one record")
if rtype is not None:
rec['type'] = rtype.upper()
if name is not None:
rec['name'] = self._relative_name(name)
if content is not None:
rec['value'] = self._txt_encode(
content) if rec['type'] == 'TXT' else content
records = self._api.domain.zone.record.update(
self._api_key, self._zone_id, version, {'id': records[0]['id']}, rec)
if len(records) != 1:
raise self.GandiInternalError(
"Expected one updated record")
self._api.domain.zone.version.set(
self._api_key, self._zone_id, version)
ret = True
except self.GandiInternalError:
pass
finally:
if not ret and version is not None:
self._api.domain.zone.version.delete(
self._api_key, self._zone_id, version)
LOGGER.debug("update_record: %s", ret)
return ret
# Delete existing records.
# If records do not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""Removes the specified records in a new Gandi zone."""
version = None
ret = False
opts = {}
if identifier is not None:
opts['id'] = identifier
else:
if not rtype and not name and not content:
raise ValueError(
'Error, at least one parameter from type, name or content must be set')
if rtype:
opts['type'] = rtype.upper()
if name:
opts['name'] = self._relative_name(name)
if content:
opts['value'] = self._txt_encode(
content) if opts['type'] == 'TXT' else content
records = self._api.domain.zone.record.list(
self._api_key, self._zone_id, 0, opts)
if records:
try:
version = self._api.domain.zone.version.new(
self._api_key, self._zone_id)
for record in records:
del record['id']
self._api.domain.zone.record.delete(
self._api_key, self._zone_id, version, record)
self._api.domain.zone.version.set(
self._api_key, self._zone_id, version)
ret = True
finally:
if not ret and version is not None:
self._api.domain.zone.version.delete(
self._api_key, self._zone_id, version)
LOGGER.debug("delete_record: %s", ret)
return ret
@staticmethod
def _txt_encode(val):
if not val:
return None
return ''.join(['"', val.replace('\\', '\\\\').replace('"', '\\"'), '"'])
@staticmethod
def _txt_decode(val):
if not val:
return None
if len(val) > 1 and val[0:1] == '"':
val = val[1:-1].replace('" "', '').replace('\\"',
'"').replace('\\\\', '\\')
return val
# This exception is for cleaner handling of internal errors
# within the Gandi provider codebase
class GandiInternalError(Exception):
"""Internal exception handling class for Gandi management errors"""
<|code_end|>
|
DNS Made Easy needs retry logic, to recover from rate limiting errors
Originally certbot/certbot#7411.
DNS Made Easy has [restrictive rate limits](https://api-docs.dnsmadeeasy.com/?version=latest#f6f3c489-422d-4cf0-bccb-1933e6d655ac):
>To prevent unwanted flooding of the API system, there is a maximum number of requests that can be sent in a given time period. This limit is 150 requests per 5 minute scrolling window
Lexicon should be able to recover from hitting the rate limit without failing the operation.
The response received is an HTTP 400 with a response body of:
>{"error": ["Rate limit exceeded"]}
@adferrand suggested retry logic in [this comment](https://github.com/certbot/certbot/issues/7411#issuecomment-536438100):
>How about defining a short sleep (like 5 seconds), and a retry strategy with a max attempts, triggered in case of 400 with body {"error": ["Rate limit exceeded"]}?
| lexicon/providers/dnsmadeeasy.py
<|code_start|>"""Module provider for DNSMadeEasy"""
from __future__ import absolute_import
import hmac
import json
import logging
from builtins import bytes
from email.utils import formatdate
from hashlib import sha1
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['dnsmadeeasy']
def provider_parser(subparser):
"""Configure provider parser for DNSMadeEasy"""
subparser.add_argument(
"--auth-username", help="specify username for authentication")
subparser.add_argument(
"--auth-token", help="specify token for authentication")
class Provider(BaseProvider):
"""Provider class for DNSMadeEasy"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = self._get_provider_option(
'api_endpoint') or 'https://api.dnsmadeeasy.com/V2.0'
def _authenticate(self):
try:
payload = self._get('/dns/managed/name',
{'domainname': self.domain})
except requests.exceptions.HTTPError as error:
if error.response.status_code == 404:
payload = {}
else:
raise
if not payload or not payload['id']:
raise Exception('No domain found')
self.domain_id = payload['id']
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
record = {
'type': rtype,
'name': self._relative_name(name),
'value': content,
'ttl': self._get_lexicon_option('ttl')
}
payload = {}
try:
payload = self._post(
'/dns/managed/{0}/records/'.format(self.domain_id), record)
except requests.exceptions.HTTPError as error:
if error.response.status_code != 400:
raise
# http 400 is ok here, because the record probably already exists
LOGGER.debug('create_record: %s', 'name' in payload)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
filter_query = {}
if rtype:
filter_query['type'] = rtype
if name:
filter_query['recordName'] = self._relative_name(name)
payload = self._get(
'/dns/managed/{0}/records'.format(self.domain_id), filter_query)
records = []
for record in payload['data']:
processed_record = {
'type': record['type'],
'name': '{0}.{1}'.format(record['name'], self.domain),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
processed_record = self._clean_TXT_record(processed_record)
records.append(processed_record)
if content:
records = [
record for record in records if record['content'].lower() == content.lower()]
LOGGER.debug('list_records: %s', records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {
'id': identifier,
'ttl': self._get_lexicon_option('ttl')
}
if name:
data['name'] = self._relative_name(name)
if content:
data['value'] = content
if rtype:
data['type'] = rtype
self._put(
'/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier), data)
LOGGER.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete(
'/dns/managed/{0}/records/{1}'.format(self.domain_id, record_id))
# is always True at this point, if a non 200 response is returned an error is raised.
LOGGER.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'x-dnsme-apiKey': self._get_provider_option('auth_username')
}
default_auth = None
# Date string in HTTP format e.g. Sat, 12 Feb 2011 20:59:04 GMT
request_date = formatdate(usegmt=True)
hashed = hmac.new(bytes(self._get_provider_option('auth_token'), 'ascii'),
bytes(request_date, 'ascii'), sha1)
default_headers['x-dnsme-requestDate'] = request_date
default_headers['x-dnsme-hmac'] = hashed.hexdigest()
response = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers=default_headers,
auth=default_auth)
# if the request fails for any reason, throw an error.
response.raise_for_status()
# PUT and DELETE actions dont return valid json.
if action in ['DELETE', 'PUT']:
return response.text
return response.json()
<|code_end|>
| lexicon/providers/dnsmadeeasy.py
<|code_start|>"""Module provider for DNSMadeEasy"""
from __future__ import absolute_import
import hmac
import json
import logging
from builtins import bytes
from email.utils import formatdate
from hashlib import sha1
from urllib3.util.retry import Retry
import requests
from requests.adapters import HTTPAdapter
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['dnsmadeeasy']
class _RetryRateLimit(Retry):
# Standard urllib3 Retry objects trigger retries only based on HTTP status code or HTTP method.
# However we need to differentiate 400 errors with body `{"error": ["Rate limit exceeded"]}`
# from the other 400 errors. The internal _RetryRateLimit class does that.
def increment(self, method=None, url=None, response=None,
error=None, _pool=None, _stacktrace=None):
if response:
body = json.loads(response.data)
if 'Rate limit exceeded' in body.get('error', []):
return super(_RetryRateLimit, self).increment(
method, url, response, error, _pool, _stacktrace)
raise RuntimeError('URL {0} returned a HTTP 400 status code.'.format(url))
def provider_parser(subparser):
"""Configure provider parser for DNSMadeEasy"""
subparser.add_argument(
"--auth-username", help="specify username for authentication")
subparser.add_argument(
"--auth-token", help="specify token for authentication")
class Provider(BaseProvider):
"""Provider class for DNSMadeEasy"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = self._get_provider_option(
'api_endpoint') or 'https://api.dnsmadeeasy.com/V2.0'
def _authenticate(self):
try:
payload = self._get('/dns/managed/name',
{'domainname': self.domain})
except requests.exceptions.HTTPError as error:
if error.response.status_code == 404:
payload = {}
else:
raise
if not payload or not payload['id']:
raise Exception('No domain found')
self.domain_id = payload['id']
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
record = {
'type': rtype,
'name': self._relative_name(name),
'value': content,
'ttl': self._get_lexicon_option('ttl')
}
payload = {}
try:
payload = self._post(
'/dns/managed/{0}/records/'.format(self.domain_id), record)
except requests.exceptions.HTTPError as error:
if error.response.status_code != 400:
raise
# http 400 is ok here, because the record probably already exists
LOGGER.debug('create_record: %s', 'name' in payload)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
filter_query = {}
if rtype:
filter_query['type'] = rtype
if name:
filter_query['recordName'] = self._relative_name(name)
payload = self._get(
'/dns/managed/{0}/records'.format(self.domain_id), filter_query)
records = []
for record in payload['data']:
processed_record = {
'type': record['type'],
'name': '{0}.{1}'.format(record['name'], self.domain),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
processed_record = self._clean_TXT_record(processed_record)
records.append(processed_record)
if content:
records = [
record for record in records if record['content'].lower() == content.lower()]
LOGGER.debug('list_records: %s', records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {
'id': identifier,
'ttl': self._get_lexicon_option('ttl')
}
if name:
data['name'] = self._relative_name(name)
if content:
data['value'] = content
if rtype:
data['type'] = rtype
self._put(
'/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier), data)
LOGGER.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete(
'/dns/managed/{0}/records/{1}'.format(self.domain_id, record_id))
# is always True at this point, if a non 200 response is returned an error is raised.
LOGGER.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'x-dnsme-apiKey': self._get_provider_option('auth_username')
}
default_auth = None
# Date string in HTTP format e.g. Sat, 12 Feb 2011 20:59:04 GMT
request_date = formatdate(usegmt=True)
hashed = hmac.new(bytes(self._get_provider_option('auth_token'), 'ascii'),
bytes(request_date, 'ascii'), sha1)
default_headers['x-dnsme-requestDate'] = request_date
default_headers['x-dnsme-hmac'] = hashed.hexdigest()
session = requests.Session()
try:
# DNSMadeEasy allows only 150 requests in a floating 5 min time window.
# So we implement a retry strategy on requests returned as 400 with body
# `{"error": ["Rate limit exceeded"]}`.
# 10 retries with backoff = 0.6 gives following retry delays after first attempt:
# 1.2s, 2.4s, 4.8s, 9.6s, 19.2s, 38.4s, 76.8s, 153.6s, 307.2s
# So last attempt is done 5 min 7 seconds after first try, so the
# size of the floating window.
# Beyond it we can assume something else is wrong and so give up.
session_retries = _RetryRateLimit(total=10, backoff_factor=0.6, status_forcelist=[400])
session_adapter = HTTPAdapter(max_retries=session_retries)
session.mount('http://', session_adapter)
session.mount('https://', session_adapter)
response = session.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers=default_headers,
auth=default_auth)
# if the request fails for any reason, throw an error.
response.raise_for_status()
# PUT and DELETE actions dont return valid json.
if action in ['DELETE', 'PUT']:
return response.text
return response.json()
finally:
session.close()
<|code_end|>
|
LuaDNS _list_records fails due to missing "id" in API response
I'm using certbot with the luadns plugin which in turn uses lexicon.
I've been experiencing the error reported here: https://community.letsencrypt.org/t/luadns-renew-error/89634
```
2019-08-29 06:32:43,517:DEBUG:urllib3.connectionpool:https://api.luadns.com:443 "GET /v1/zones/[XXXXXX]/records HTTP/1.1" 200 None
2019-08-29 06:32:43,520:ERROR:certbot.error_handler:Encountered exception during recovery:
Traceback (most recent call last):
File "/home/ubuntu/.local/lib/python2.7/site-packages/certbot/error_handler.py", line 124, in _call_registered
self.funcs[-1]()
File "/home/ubuntu/.local/lib/python2.7/site-packages/certbot/auth_handler.py", line 220, in _cleanup_challenges
self.auth.cleanup(achalls)
File "/home/ubuntu/.local/lib/python2.7/site-packages/certbot/plugins/dns_common.py", line 77, in cleanup
self._cleanup(domain, validation_domain_name, validation)
File "/usr/local/lib/python2.7/dist-packages/certbot_dns_luadns/dns_luadns.py", line 55, in _cleanup
self._get_luadns_client().del_txt_record(domain, validation_name, validation)
File "/home/ubuntu/.local/lib/python2.7/site-packages/certbot/plugins/dns_common_lexicon.py", line 65, in del_txt_record
self.provider.delete_record(type='TXT', name=record_name, content=record_content)
File "/usr/local/lib/python2.7/dist-packages/lexicon/providers/base.py", line 118, in delete_record
return self._delete_record(identifier=identifier, rtype=rtype, name=name, content=content)
File "/usr/local/lib/python2.7/dist-packages/lexicon/providers/luadns.py", line 111, in _delete_record
records = self._list_records(rtype, name, content)
File "/usr/local/lib/python2.7/dist-packages/lexicon/providers/luadns.py", line 71, in _list_records
'id': record['id']
KeyError: 'id'
```
I've replaced the zone id with [XXXXXX].
Code line that errors: https://github.com/AnalogJ/lexicon/blob/master/lexicon/providers/luadns.py#L71
Running the call directly towards the LuaDNS api shows that my REDIRECT record does not return an "id" -> no wonder it fails.
```
...
[
{
"id": XXXXXXXXX,
"name": "sub.some.domain.",
"type": "A",
"content": "111.111.111.111",
"ttl": 300,
"zone_id": XXXXX,
"generated": false,
"created_at": "2019-08-27T05:59:34.219211Z",
"updated_at": "2019-08-27T05:59:34.219213Z"
},
{
"name": "some.domain.",
"type": "REDIRECT",
"content": "1 https://sub.some.domain",
"ttl": 300,
"zone_id": XXXXX,
"generated": false,
"created_at": "2019-08-29T07:05:09.757404499Z",
"updated_at": "2019-08-29T07:05:09.757406053Z"
}
]
...
```
My question is: How do I go forward with this? (for now, I've just deleted the REDIRECT record and everything works fine)
1) Send PR to lexicon to ignore records that does not have id? ( if 'id' in record )
2) Reach out to LuaDNS for API fix?
Any input would be great :)
| lexicon/providers/luadns.py
<|code_start|>"""Module provider for luadns"""
from __future__ import absolute_import
import json
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['luadns.com']
def provider_parser(subparser):
"""Configure provider parser for luadns"""
subparser.add_argument(
"--auth-username", help="specify email address for authentication")
subparser.add_argument(
"--auth-token", help="specify token for authentication")
class Provider(BaseProvider):
"""Provider class for luadns"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = 'https://api.luadns.com/v1'
def _authenticate(self):
payload = self._get('/zones')
domain_info = next(
(domain for domain in payload if domain['name'] == self.domain), None)
if not domain_info:
raise Exception('No domain found')
self.domain_id = domain_info['id']
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
# check if record already exists
existing_records = self._list_records(rtype, name, content)
if len(existing_records) == 1:
return True
self._post('/zones/{0}/records'.format(self.domain_id),
{'type': rtype,
'name': self._fqdn_name(name),
'content': content,
'ttl': self._get_lexicon_option('ttl')})
LOGGER.debug('create_record: %s', True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
payload = self._get('/zones/{0}/records'.format(self.domain_id))
records = []
for record in payload:
processed_record = {
'type': record['type'],
'name': self._full_name(record['name']),
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']
}
records.append(processed_record)
if rtype:
records = [record for record in records if record['type'] == rtype]
if name:
records = [record for record in records if record['name']
== self._full_name(name)]
if content:
records = [
record for record in records if record['content'] == content]
LOGGER.debug('list_records: %s', records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {
'ttl': self._get_lexicon_option('ttl')
}
if rtype:
data['type'] = rtype
if name:
data['name'] = self._fqdn_name(name)
if content:
data['content'] = content
self._put(
'/zones/{0}/records/{1}'.format(self.domain_id, identifier), data)
LOGGER.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete(
'/zones/{0}/records/{1}'.format(self.domain_id, record_id))
LOGGER.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
auth=requests.auth.HTTPBasicAuth(self._get_provider_option(
'auth_username'), self._get_provider_option('auth_token')),
headers={
'Content-Type': 'application/json',
'Accept': 'application/json'
})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
<|code_end|>
| lexicon/providers/luadns.py
<|code_start|>"""Module provider for luadns"""
from __future__ import absolute_import
import json
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['luadns.com']
def provider_parser(subparser):
"""Configure provider parser for luadns"""
subparser.add_argument(
"--auth-username", help="specify email address for authentication")
subparser.add_argument(
"--auth-token", help="specify token for authentication")
class Provider(BaseProvider):
"""Provider class for luadns"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = 'https://api.luadns.com/v1'
def _authenticate(self):
payload = self._get('/zones')
domain_info = next(
(domain for domain in payload if domain['name'] == self.domain), None)
if not domain_info:
raise Exception('No domain found')
self.domain_id = domain_info['id']
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
# check if record already exists
existing_records = self._list_records(rtype, name, content)
if len(existing_records) == 1:
return True
self._post('/zones/{0}/records'.format(self.domain_id),
{'type': rtype,
'name': self._fqdn_name(name),
'content': content,
'ttl': self._get_lexicon_option('ttl')})
LOGGER.debug('create_record: %s', True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
payload = self._get('/zones/{0}/records'.format(self.domain_id))
records = []
for record in payload:
if 'id' in record:
processed_record = {
'id': record['id'],
'type': record['type'],
'name': self._full_name(record['name']),
'ttl': record['ttl'],
'content': record['content']
}
records.append(processed_record)
if rtype:
records = [record for record in records if record['type'] == rtype]
if name:
records = [record for record in records if record['name']
== self._full_name(name)]
if content:
records = [
record for record in records if record['content'] == content]
LOGGER.debug('list_records: %s', records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {
'ttl': self._get_lexicon_option('ttl')
}
if rtype:
data['type'] = rtype
if name:
data['name'] = self._fqdn_name(name)
if content:
data['content'] = content
self._put(
'/zones/{0}/records/{1}'.format(self.domain_id, identifier), data)
LOGGER.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete(
'/zones/{0}/records/{1}'.format(self.domain_id, record_id))
LOGGER.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
auth=requests.auth.HTTPBasicAuth(self._get_provider_option(
'auth_username'), self._get_provider_option('auth_token')),
headers={
'Content-Type': 'application/json',
'Accept': 'application/json'
})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
<|code_end|>
|
Easyname provider is broken
Currently, easyname support is not working as the easyname website changed. There is a pull request with a working fix though: #449
I am in the process of getting things to work so it can be merged, but in the meantime you can just apply the 2 linechanges so you can use lexicon with easyname again.
| lexicon/providers/easyname.py
<|code_start|>"""Module provider for Easyname DNS"""
from __future__ import absolute_import, print_function
import logging
from requests import Response, Session
# Handling optional dependency for this provider
try:
from bs4 import BeautifulSoup, Tag
except ImportError:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['easyname.eu']
def provider_parser(subparser):
"""Configure provider parser for Easyname DNS"""
subparser.description = """A provider for Easyname DNS."""
subparser.add_argument(
'--auth-username',
help='Specify username used to authenticate'
)
subparser.add_argument(
'--auth-password',
help='Specify password used to authenticate',
)
class Provider(BaseProvider):
"""
easyname provider
"""
URLS = {
'login': 'https://my.easyname.com/en/login',
'domain_list': 'https://my.easyname.com/domains',
'overview': 'https://my.easyname.com/hosting/view-user.php',
'dns': 'https://my.easyname.com/domains/settings/dns.php?domain={}',
'dns_create_entry': 'https://my.easyname.com/domains/settings/form.php?domain={}',
'dns_delete_entry':
'https://my.easyname.com/domains/settings/delete_record.php?domain={}&confirm=1&id={}'
}
def __init__(self, config):
super(Provider, self).__init__(config)
self.session = Session()
self.domain_id = None
self._records = None
def _authenticate(self):
"""
Authenticates against Easyname website and try to find out the domain
id.
Easyname uses a CSRF token in its login form, so two requests are
neccessary to actually login.
Returns:
bool: True if domain id was found.
Raises:
AssertionError: When a request returns unexpected or unknown data.
ValueError: When login data is wrong or the domain does not exist.
"""
csrf_token = self._get_csrf_token()
self._login(csrf_token)
domain_text_element = self._get_domain_text_of_authoritative_zone()
self.domain_id = self._get_domain_id(domain_text_element)
LOGGER.debug('Easyname domain ID: %s', self.domain_id)
return True
def _create_record(self, rtype, name, content):
return self._create_record_internal(rtype=rtype, name=name, content=content)
def _create_record_internal(self, rtype, name, content, identifier=None):
"""
Create a new DNS entry in the domain zone if it does not already exist.
Args:
rtype (str): The DNS type (e.g. A, TXT, MX, etc) of the new entry.
name (str): The name of the new DNS entry, e.g the domain for which a
MX entry shall be valid.
content (str): The content of the new DNS entry, e.g. the mail server
hostname for a MX entry.
[identifier] (str): The easyname id of a DNS entry. Use to overwrite an
existing entry.
Returns:
bool: True if the record was created successfully, False otherwise.
"""
name = self._relative_name(name) if name is not None else name
LOGGER.debug('Creating record with name %s', name)
if self._is_duplicate_record(rtype, name, content):
return True
data = self._get_post_data_to_create_dns_entry(rtype, name, content, identifier)
LOGGER.debug('Create DNS data: %s', data)
create_response = self.session.post(
self.URLS['dns_create_entry'].format(self.domain_id),
data=data
)
self._invalidate_records_cache()
self._log('Create DNS entry', create_response)
# Pull a list of records and check for ours
was_success = len(self._list_records(rtype, name, content)) > 0
if was_success:
msg = 'Successfully added record %s'
else:
msg = 'Failed to add record %s'
LOGGER.info(msg, name)
return was_success
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""
Delete one or more DNS entries in the domain zone that match the given
criteria.
Args:
[identifier] (str): An ID to match against DNS entry easyname IDs.
[rtype] (str): A DNS rtype (e.g. A, TXT, MX, etc) to match against DNS
entry types.
[name] (str): A name to match against DNS entry names.
[content] (str): A content to match against a DNS entry contents.
Returns:
bool: True if the record(s) were deleted successfully, False
otherwise.
"""
success_url = self.URLS['dns'].format(self.domain_id)
record_ids = self._get_matching_dns_entry_ids(identifier, rtype,
name, content)
LOGGER.debug('Record IDs to delete: %s', record_ids)
success = True
for rec_id in record_ids:
delete_response = self.session.get(
self.URLS['dns_delete_entry'].format(self.domain_id, rec_id))
self._invalidate_records_cache()
self._log('Delete DNS entry {}'.format(rec_id), delete_response)
success = success and delete_response.url == success_url
return success
def _update_record(self, identifier, rtype=None, name=None, content=None):
"""
Update a DNS entry identified by identifier or name in the domain zone.
Any non given argument will leave the current value of the DNS entry.
Args:
identifier (str): The easyname id of the DNS entry to update.
[rtype] (str): The DNS rtype (e.g. A, TXT, MX, etc) of the new entry.
[name] (str): The name of the new DNS entry, e.g the domain for which
a MX entry shall be valid.
[content] (str): The content of the new DNS entry, e.g. the mail
server hostname for a MX entry.
Returns:
bool: True if the record was updated successfully, False otherwise.
Raises:
AssertionError: When a request returns unexpected or unknown data.
"""
if identifier is not None:
identifier = int(identifier)
records = self._list_records_internal(identifier=identifier)
else:
records = self._list_records_internal(name=name, rtype=rtype)
LOGGER.debug('Records to update (%d): %s', len(records), records)
assert records, 'No record found to update'
success = True
for record in records:
name = name if name is not None else record['name']
rtype = rtype if rtype is not None else record['type']
content = content if content is not None \
else record['content']
success = success and self._create_record_internal(
rtype, name, content, record['id'])
return success
def _list_records(self, rtype=None, name=None, content=None):
return self._list_records_internal(rtype=rtype, name=name, content=content)
def _list_records_internal(self, rtype=None, name=None, content=None, identifier=None):
"""
Filter and list DNS entries of domain zone on Easyname.
Easyname shows each entry in a HTML table row and each attribute on a
table column.
Args:
[rtype] (str): Filter by DNS rtype (e.g. A, TXT, MX, etc)
[name] (str): Filter by the name of the DNS entry, e.g the domain for
which a MX entry shall be valid.
[content] (str): Filter by the content of the DNS entry, e.g. the
mail server hostname for a MX entry.
[identifier] (str): Filter by the easyname id of the DNS entry.
Returns:
list: A list of DNS entries. A DNS entry is an object with DNS
attribute names as keys (e.g. name, content, priority, etc)
and additionally an id.
Raises:
AssertionError: When a request returns unexpected or unknown data.
"""
name = self._full_name(name) if name is not None else name
if self._records is None:
records = []
rows = self._get_dns_entry_trs()
for index, row in enumerate(rows):
self._log('DNS list entry', row)
try:
rec = {}
if row.has_attr('ondblclick'):
rec['id'] = int(row['ondblclick'].split(
'id=')[1].split("'")[0])
else:
rec['id'] = -index
columns = row.find_all('td')
rec['name'] = (columns[0].string or '').strip()
rec['type'] = (columns[1].contents[1] or '').strip()
rec['content'] = (columns[2].string or '').strip()
rec['priority'] = (columns[3].string or '').strip()
rec['ttl'] = (columns[4].string or '').strip()
if rec['priority']:
rec['priority'] = int(rec['priority'])
if rec['ttl']:
rec['ttl'] = int(rec['ttl'])
except Exception as error:
errmsg = 'Cannot parse DNS entry ({}).'.format(error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
records.append(rec)
self._records = records
records = self._filter_records(self._records, rtype, name, content, identifier)
LOGGER.debug('Final records (%d): %s', len(records), records)
return records
def _request(self, action='GET', url='/', data=None, query_params=None):
pass
def _invalidate_records_cache(self):
"""
Invalidate DNS entries cache such that list_records will do a new
request to retrieve DNS entries.
"""
self._records = None
def _get_post_data_to_create_dns_entry(self, rtype, name, content, identifier=None):
"""
Build and return the post date that is needed to create a DNS entry.
"""
is_update = identifier is not None
record = None
if is_update:
records = self._list_records_internal(identifier=identifier)
assert len(records) == 1, 'ID is not unique or does not exist'
record = records[0]
LOGGER.debug('Create post data to update record: %s', record)
data = {
'id': str(identifier) if is_update else '',
'action': 'save',
'name': name,
'type': rtype,
'content': content,
'prio': str(record['priority']) if is_update else '10',
'ttl': str(record['ttl']) if is_update else '360',
'commit': ''
}
ttl = self._get_lexicon_option('ttl')
if ttl and ttl > 360:
data['ttl'] = str(ttl)
prio = self._get_lexicon_option('priority')
if prio and prio > 0:
data['prio'] = str(prio)
return data
def _is_duplicate_record(self, rtype, name, content):
"""Check if DNS entry already exists."""
records = self._list_records(rtype, name, content)
is_duplicate = len(records) >= 1
if is_duplicate:
LOGGER.info('Duplicate record %s %s %s, NOOP', rtype, name, content)
return is_duplicate
def _get_matching_dns_entry_ids(self, identifier=None, rtype=None,
name=None, content=None):
"""Return a list of DNS entries that match the given criteria."""
record_ids = []
if not identifier:
records = self._list_records(rtype, name, content)
record_ids = [record['id'] for record in records]
else:
record_ids.append(identifier)
return record_ids
def _get_dns_entry_trs(self):
"""
Return the TR elements holding the DNS entries.
"""
dns_list_response = self.session.get(
self.URLS['dns'].format(self.domain_id))
self._log('DNS list', dns_list_response)
assert dns_list_response.status_code == 200, \
'Could not load DNS entries.'
html = BeautifulSoup(dns_list_response.content, 'html.parser')
self._log('DNS list', html)
dns_table = html.find('table', {'id': 'cp_domains_dnseintraege'})
assert dns_table is not None, 'Could not find DNS entry table'
def _is_zone_tr(elm):
has_ondblclick = elm.has_attr('ondblclick')
has_class = elm.has_attr('class')
return elm.name.lower() == 'tr' and (has_class or has_ondblclick)
rows = dns_table.findAll(_is_zone_tr)
assert rows is not None and rows, 'Could not find any DNS entries'
return rows
def _filter_records(self, records, rtype=None, name=None, content=None, identifier=None): # pylint: disable=too-many-arguments,no-self-use
"""
Filter dns entries based on type, name or content.
"""
if not records:
return []
if identifier is not None:
LOGGER.debug('Filtering %d records by id: %s', len(records), identifier)
records = [record for record in records if record['id'] == identifier]
if rtype is not None:
LOGGER.debug('Filtering %d records by type: %s', len(records), rtype)
records = [record for record in records if record['type'] == rtype]
if name is not None:
LOGGER.debug('Filtering %d records by name: %s', len(records), name)
if name.endswith('.'):
name = name[:-1]
records = [record for record in records if name == record['name']]
if content is not None:
LOGGER.debug('Filtering %d records by content: %s', len(records), content.lower())
records = [record for record in records if
record['content'].lower() == content.lower()]
return records
def _get_csrf_token(self):
"""Return the CSRF Token of easyname login form."""
home_response = self.session.get(self.URLS['login'])
self._log('Home', home_response)
assert home_response.status_code == 200, \
'Could not load Easyname login page.'
html = BeautifulSoup(home_response.content, 'html.parser')
self._log('Home', html)
csrf_token_field = html.find('input', {'id': 'loginxtoken'})
assert csrf_token_field is not None, 'Could not find login token.'
return csrf_token_field['value']
def _login(self, csrf_token):
"""Attempt to login session on easyname."""
login_response = self.session.post(
self.URLS['login'],
data={
'username': self._get_provider_option('auth_username') or '',
'password': self._get_provider_option('auth_password') or '',
'submit': '',
'loginxtoken': csrf_token,
}
)
self._log('Login', login_response)
assert login_response.status_code == 200, \
'Could not login due to a network error.'
assert login_response.url == self.URLS['overview'], \
'Easyname login failed, bad EASYNAME_USER or EASYNAME_PASS.'
def _get_domain_text_of_authoritative_zone(self):
"""Get the authoritative name zone."""
# We are logged in, so get the domain list
zones_response = self.session.get(self.URLS['domain_list'])
self._log('Zone', zones_response)
assert zones_response.status_code == 200, \
'Could not retrieve domain list due to a network error.'
html = BeautifulSoup(zones_response.content, 'html.parser')
self._log('Zone', html)
domain_table = html.find('table', {'id': 'cp_domain_table'})
assert domain_table is not None, 'Could not find domain table'
# (Sub)domains can either be managed in their own zones or by the
# zones of their parent (sub)domains. Iterate over all subdomains
# (starting with the deepest one) and see if there is an own zone
# for it.
domain = self.domain or ''
domain_text = None
subdomains = domain.split('.')
while True:
domain = '.'.join(subdomains)
LOGGER.debug('Check if %s has own zone', domain)
domain_text = domain_table.find(string=domain)
if domain_text is not None or len(subdomains) < 3:
break
subdomains.pop(0)
# Update domain to equal the zone's domain. This is important if we are
# handling a subdomain that has no zone of itself. If we do not do
# this, self._relative_name will strip also a part of the subdomain
# away.
self.domain = domain
assert domain_text is not None, \
'The domain does not exist on Easyname.'
return domain_text
def _get_domain_id(self, domain_text_element): # pylint: disable=no-self-use
"""Return the easyname id of the domain."""
try:
# Hierarchy: TR > TD > SPAN > Domain Text
tr_anchor = domain_text_element.parent.parent.parent
td_anchor = tr_anchor.find('td', {'class': 'td_2'})
link = td_anchor.find('a')['href']
domain_id = link.rsplit('/', 1)[-1]
return domain_id
except Exception as error:
errmsg = ('Cannot get the domain id even though the domain seems '
'to exist (%s).', error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
def _log(self, name, element): # pylint: disable=no-self-use
"""
Log Response and Tag elements. Do nothing if elements is none of them.
"""
if isinstance(element, Response):
LOGGER.debug('%s response: URL=%s Code=%s', name, element.url, element.status_code)
elif isinstance(element, (BeautifulSoup, Tag)):
LOGGER.debug('%s HTML:\n%s', name, element)
<|code_end|>
| lexicon/providers/easyname.py
<|code_start|>"""Module provider for Easyname DNS"""
from __future__ import absolute_import, print_function
import logging
from requests import Response, Session
# Handling optional dependency for this provider
try:
from bs4 import BeautifulSoup, Tag
except ImportError:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['easyname.eu']
def provider_parser(subparser):
"""Configure provider parser for Easyname DNS"""
subparser.description = """A provider for Easyname DNS."""
subparser.add_argument(
'--auth-username',
help='Specify username used to authenticate'
)
subparser.add_argument(
'--auth-password',
help='Specify password used to authenticate',
)
class Provider(BaseProvider):
"""
easyname provider
"""
URLS = {
'login': 'https://my.easyname.com/en/login',
'domain_list': 'https://my.easyname.com/domains/',
'overview': 'https://my.easyname.com/hosting/view-user.php',
'dns': 'https://my.easyname.com/domains/settings/dns.php?domain={}',
'dns_create_entry': 'https://my.easyname.com/domains/settings/form.php?domain={}',
'dns_delete_entry':
'https://my.easyname.com/domains/settings/delete_record.php?domain={}&confirm=1&id={}'
}
def __init__(self, config):
super(Provider, self).__init__(config)
self.session = Session()
self.domain_id = None
self._records = None
def _authenticate(self):
"""
Authenticates against Easyname website and try to find out the domain
id.
Easyname uses a CSRF token in its login form, so two requests are
neccessary to actually login.
Returns:
bool: True if domain id was found.
Raises:
AssertionError: When a request returns unexpected or unknown data.
ValueError: When login data is wrong or the domain does not exist.
"""
csrf_token = self._get_csrf_token()
self._login(csrf_token)
domain_text_element = self._get_domain_text_of_authoritative_zone()
self.domain_id = self._get_domain_id(domain_text_element)
LOGGER.debug('Easyname domain ID: %s', self.domain_id)
return True
def _create_record(self, rtype, name, content):
return self._create_record_internal(rtype=rtype, name=name, content=content)
def _create_record_internal(self, rtype, name, content, identifier=None):
"""
Create a new DNS entry in the domain zone if it does not already exist.
Args:
rtype (str): The DNS type (e.g. A, TXT, MX, etc) of the new entry.
name (str): The name of the new DNS entry, e.g the domain for which a
MX entry shall be valid.
content (str): The content of the new DNS entry, e.g. the mail server
hostname for a MX entry.
[identifier] (str): The easyname id of a DNS entry. Use to overwrite an
existing entry.
Returns:
bool: True if the record was created successfully, False otherwise.
"""
name = self._relative_name(name) if name is not None else name
LOGGER.debug('Creating record with name %s', name)
if self._is_duplicate_record(rtype, name, content):
return True
data = self._get_post_data_to_create_dns_entry(rtype, name, content, identifier)
LOGGER.debug('Create DNS data: %s', data)
create_response = self.session.post(
self.URLS['dns_create_entry'].format(self.domain_id),
data=data
)
self._invalidate_records_cache()
self._log('Create DNS entry', create_response)
# Pull a list of records and check for ours
was_success = len(self._list_records(rtype, name, content)) > 0
if was_success:
msg = 'Successfully added record %s'
else:
msg = 'Failed to add record %s'
LOGGER.info(msg, name)
return was_success
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""
Delete one or more DNS entries in the domain zone that match the given
criteria.
Args:
[identifier] (str): An ID to match against DNS entry easyname IDs.
[rtype] (str): A DNS rtype (e.g. A, TXT, MX, etc) to match against DNS
entry types.
[name] (str): A name to match against DNS entry names.
[content] (str): A content to match against a DNS entry contents.
Returns:
bool: True if the record(s) were deleted successfully, False
otherwise.
"""
success_url = self.URLS['dns'].format(self.domain_id)
record_ids = self._get_matching_dns_entry_ids(identifier, rtype,
name, content)
LOGGER.debug('Record IDs to delete: %s', record_ids)
success = True
for rec_id in record_ids:
delete_response = self.session.get(
self.URLS['dns_delete_entry'].format(self.domain_id, rec_id))
self._invalidate_records_cache()
self._log('Delete DNS entry {}'.format(rec_id), delete_response)
success = success and delete_response.url == success_url
return success
def _update_record(self, identifier, rtype=None, name=None, content=None):
"""
Update a DNS entry identified by identifier or name in the domain zone.
Any non given argument will leave the current value of the DNS entry.
Args:
identifier (str): The easyname id of the DNS entry to update.
[rtype] (str): The DNS rtype (e.g. A, TXT, MX, etc) of the new entry.
[name] (str): The name of the new DNS entry, e.g the domain for which
a MX entry shall be valid.
[content] (str): The content of the new DNS entry, e.g. the mail
server hostname for a MX entry.
Returns:
bool: True if the record was updated successfully, False otherwise.
Raises:
AssertionError: When a request returns unexpected or unknown data.
"""
if identifier is not None:
identifier = int(identifier)
records = self._list_records_internal(identifier=identifier)
else:
records = self._list_records_internal(name=name, rtype=rtype)
LOGGER.debug('Records to update (%d): %s', len(records), records)
assert records, 'No record found to update'
success = True
for record in records:
name = name if name is not None else record['name']
rtype = rtype if rtype is not None else record['type']
content = content if content is not None \
else record['content']
success = success and self._create_record_internal(
rtype, name, content, record['id'])
return success
def _list_records(self, rtype=None, name=None, content=None):
return self._list_records_internal(rtype=rtype, name=name, content=content)
def _list_records_internal(self, rtype=None, name=None, content=None, identifier=None):
"""
Filter and list DNS entries of domain zone on Easyname.
Easyname shows each entry in a HTML table row and each attribute on a
table column.
Args:
[rtype] (str): Filter by DNS rtype (e.g. A, TXT, MX, etc)
[name] (str): Filter by the name of the DNS entry, e.g the domain for
which a MX entry shall be valid.
[content] (str): Filter by the content of the DNS entry, e.g. the
mail server hostname for a MX entry.
[identifier] (str): Filter by the easyname id of the DNS entry.
Returns:
list: A list of DNS entries. A DNS entry is an object with DNS
attribute names as keys (e.g. name, content, priority, etc)
and additionally an id.
Raises:
AssertionError: When a request returns unexpected or unknown data.
"""
name = self._full_name(name) if name is not None else name
if self._records is None:
records = []
rows = self._get_dns_entry_trs()
for index, row in enumerate(rows):
self._log('DNS list entry', row)
try:
rec = {}
if row.has_attr('ondblclick'):
rec['id'] = int(row['ondblclick'].split(
'id=')[1].split("'")[0])
else:
rec['id'] = -index
columns = row.find_all('td')
rec['name'] = (columns[0].string or '').strip()
rec['type'] = (columns[1].contents[1] or '').strip()
rec['content'] = (columns[2].string or '').strip()
rec['priority'] = (columns[3].string or '').strip()
rec['ttl'] = (columns[4].string or '').strip()
if rec['priority']:
rec['priority'] = int(rec['priority'])
if rec['ttl']:
rec['ttl'] = int(rec['ttl'])
except Exception as error:
errmsg = 'Cannot parse DNS entry ({}).'.format(error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
records.append(rec)
self._records = records
records = self._filter_records(self._records, rtype, name, content, identifier)
LOGGER.debug('Final records (%d): %s', len(records), records)
return records
def _request(self, action='GET', url='/', data=None, query_params=None):
pass
def _invalidate_records_cache(self):
"""
Invalidate DNS entries cache such that list_records will do a new
request to retrieve DNS entries.
"""
self._records = None
def _get_post_data_to_create_dns_entry(self, rtype, name, content, identifier=None):
"""
Build and return the post date that is needed to create a DNS entry.
"""
is_update = identifier is not None
record = None
if is_update:
records = self._list_records_internal(identifier=identifier)
assert len(records) == 1, 'ID is not unique or does not exist'
record = records[0]
LOGGER.debug('Create post data to update record: %s', record)
data = {
'id': str(identifier) if is_update else '',
'action': 'save',
'name': name,
'type': rtype,
'content': content,
'prio': str(record['priority']) if is_update else '10',
'ttl': str(record['ttl']) if is_update else '360',
'commit': ''
}
ttl = self._get_lexicon_option('ttl')
if ttl and ttl > 360:
data['ttl'] = str(ttl)
prio = self._get_lexicon_option('priority')
if prio and prio > 0:
data['prio'] = str(prio)
return data
def _is_duplicate_record(self, rtype, name, content):
"""Check if DNS entry already exists."""
records = self._list_records(rtype, name, content)
is_duplicate = len(records) >= 1
if is_duplicate:
LOGGER.info('Duplicate record %s %s %s, NOOP', rtype, name, content)
return is_duplicate
def _get_matching_dns_entry_ids(self, identifier=None, rtype=None,
name=None, content=None):
"""Return a list of DNS entries that match the given criteria."""
record_ids = []
if not identifier:
records = self._list_records(rtype, name, content)
record_ids = [record['id'] for record in records]
else:
record_ids.append(identifier)
return record_ids
def _get_dns_entry_trs(self):
"""
Return the TR elements holding the DNS entries.
"""
dns_list_response = self.session.get(
self.URLS['dns'].format(self.domain_id))
self._log('DNS list', dns_list_response)
assert dns_list_response.status_code == 200, \
'Could not load DNS entries.'
html = BeautifulSoup(dns_list_response.content, 'html.parser')
self._log('DNS list', html)
dns_table = html.find('table', {'id': 'cp_domains_dnseintraege'})
assert dns_table is not None, 'Could not find DNS entry table'
def _is_zone_tr(elm):
has_ondblclick = elm.has_attr('ondblclick')
has_class = elm.has_attr('class')
return elm.name.lower() == 'tr' and (has_class or has_ondblclick)
rows = dns_table.findAll(_is_zone_tr)
assert rows is not None and rows, 'Could not find any DNS entries'
return rows
def _filter_records(self, records, rtype=None, name=None, content=None, identifier=None): # pylint: disable=too-many-arguments,no-self-use
"""
Filter dns entries based on type, name or content.
"""
if not records:
return []
if identifier is not None:
LOGGER.debug('Filtering %d records by id: %s', len(records), identifier)
records = [record for record in records if record['id'] == identifier]
if rtype is not None:
LOGGER.debug('Filtering %d records by type: %s', len(records), rtype)
records = [record for record in records if record['type'] == rtype]
if name is not None:
LOGGER.debug('Filtering %d records by name: %s', len(records), name)
if name.endswith('.'):
name = name[:-1]
records = [record for record in records if name == record['name']]
if content is not None:
LOGGER.debug('Filtering %d records by content: %s', len(records), content.lower())
records = [record for record in records if
record['content'].lower() == content.lower()]
return records
def _get_csrf_token(self):
"""Return the CSRF Token of easyname login form."""
home_response = self.session.get(self.URLS['login'])
self._log('Home', home_response)
assert home_response.status_code == 200, \
'Could not load Easyname login page.'
html = BeautifulSoup(home_response.content, 'html.parser')
self._log('Home', html)
csrf_token_field = html.find('input', {'id': 'loginxtoken'})
assert csrf_token_field is not None, 'Could not find login token.'
return csrf_token_field['value']
def _login(self, csrf_token):
"""Attempt to login session on easyname."""
login_response = self.session.post(
self.URLS['login'],
data={
'username': self._get_provider_option('auth_username') or '',
'password': self._get_provider_option('auth_password') or '',
'submit': '',
'loginxtoken': csrf_token,
}
)
self._log('Login', login_response)
assert login_response.status_code == 200, \
'Could not login due to a network error.'
assert login_response.url == self.URLS['domain_list'], \
'Easyname login failed, bad EASYNAME_USER or EASYNAME_PASS.%s' % login_response.url
def _get_domain_text_of_authoritative_zone(self):
"""Get the authoritative name zone."""
# We are logged in, so get the domain list
zones_response = self.session.get(self.URLS['domain_list'])
self._log('Zone', zones_response)
assert zones_response.status_code == 200, \
'Could not retrieve domain list due to a network error.'
html = BeautifulSoup(zones_response.content, 'html.parser')
self._log('Zone', html)
domain_table = html.find('table', {'id': 'cp_domain_table'})
assert domain_table is not None, 'Could not find domain table'
# (Sub)domains can either be managed in their own zones or by the
# zones of their parent (sub)domains. Iterate over all subdomains
# (starting with the deepest one) and see if there is an own zone
# for it.
domain = self.domain or ''
domain_text = None
subdomains = domain.split('.')
while True:
domain = '.'.join(subdomains)
LOGGER.debug('Check if %s has own zone', domain)
domain_text = domain_table.find(string=domain)
if domain_text is not None or len(subdomains) < 3:
break
subdomains.pop(0)
# Update domain to equal the zone's domain. This is important if we are
# handling a subdomain that has no zone of itself. If we do not do
# this, self._relative_name will strip also a part of the subdomain
# away.
self.domain = domain
assert domain_text is not None, \
'The domain does not exist on Easyname.'
return domain_text
def _get_domain_id(self, domain_text_element): # pylint: disable=no-self-use
"""Return the easyname id of the domain."""
try:
# Hierarchy: TR > TD > SPAN > Domain Text
tr_anchor = domain_text_element.parent.parent.parent
td_anchor = tr_anchor.find('td', {'class': 'td_2'})
link = td_anchor.find('a')['href']
domain_id = link.rsplit('/', 1)[-1]
return domain_id
except Exception as error:
errmsg = ('Cannot get the domain id even though the domain seems '
'to exist (%s).', error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
def _log(self, name, element): # pylint: disable=no-self-use
"""
Log Response and Tag elements. Do nothing if elements is none of them.
"""
if isinstance(element, Response):
LOGGER.debug('%s response: URL=%s Code=%s', name, element.url, element.status_code)
elif isinstance(element, (BeautifulSoup, Tag)):
LOGGER.debug('%s HTML:\n%s', name, element)
<|code_end|>
|
TLDExtract Private Domains for Dynamic DNS Providers
Hello all,
I'm currently putting together the plugin for dynu.com (listed in NYI proiders).
As Dynu also acts as Dynamic DNS provider with several toplevel domains as base for the dynamic domain (`yourhost.dynu.net`, `yourhost.freeddns.org`..., also, wildcards), I had some trouble putting together the plugin.
As an example, I'm making up `mydomain.dynu.net` as my target dynamic dns hostname.
Now, the `tldextract` package used to determine the part of the domain that belongs to the toplevel will spit out `net` as the toplevel, `dynu` as the TLD and then drops `mydomain` in further processing as seen [in client.py](../blob/master/lexicon/client.py#L43).
In turn, finding the right domain from the list of dns entries in `_authenticate` is not possible by default (as `self.domain` is set to `dynu.net`).
I discovered two workarounds for this:
1. use `--delegated "mydomain.dynu.net"` to explicitly target the subdomain
2. change the code [in client.py](../blob/master/lexicon/client.py#L41) to this:
```python
extract = tldextract.TLDExtract(include_psl_private_domains=True)
# Process domain, strip subdomain
domain_parts = extract(
self.config.resolve('lexicon:domain'))
runtime_config['domain'] = '{0}.{1}'.format(
domain_parts.domain, domain_parts.suffix)
```
The latter is taken from [the tldextract README](https://github.com/john-kurkowski/tldextract#public-vs-private-domains).
And because Dynu probably isn't the only Dynamic DNS provider using subdomains for their users, I guess this should be the default solution.
There's a catch however that is still in ongoing development [tldextract#144](https://github.com/john-kurkowski/tldextract/pull/144):
The list of TLDs is cached on first load of the extension, so if the config is not set to `include_psl_private_domains` before the package is first initialized, it won't work. So either an update has to be triggered manually, or, lexicon should be installed and used from a virtualenv in the first place.
Since I'm already making use of method 2 in my dev enviroment, I could open a PR right away, but I'm not 100% sure on side effects for other plugins, hence my hesitation.
Thanks and best,
Chris
edit// whitespace in codeblock, typos, grammar
| lexicon/client.py
<|code_start|>"""Main module of Lexicon. Defines the Client class, that holds all Lexicon logic."""
from __future__ import absolute_import
import importlib
import tldextract
from lexicon import discovery
from lexicon.config import (
ConfigResolver, DictConfigSource,
legacy_config_resolver, non_interactive_config_resolver,
)
class ProviderNotAvailableError(Exception):
"""
Custom exception to raise when a provider is not available,
typically because some optional dependencies are missing
"""
class Client(object): # pylint: disable=useless-object-inheritance,too-few-public-methods
"""This is the Lexicon client, that will execute all the logic."""
def __init__(self, config=None):
if not config:
# If there is not config specified, we load a non-interactive configuration.
self.config = non_interactive_config_resolver()
elif not isinstance(config, ConfigResolver):
# If config is not a ConfigResolver, we are in a legacy situation.
# We protect this part of the Client API.
self.config = legacy_config_resolver(config)
else:
self.config = config
# Validate configuration
self._validate_config()
runtime_config = {}
# Process domain, strip subdomain
domain_parts = tldextract.extract(
self.config.resolve('lexicon:domain'))
runtime_config['domain'] = '{0}.{1}'.format(
domain_parts.domain, domain_parts.suffix)
if self.config.resolve('lexicon:delegated'):
# handle delegated domain
delegated = self.config.resolve('lexicon:delegated').rstrip('.')
if delegated != runtime_config.get('domain'):
# convert to relative name
if delegated.endswith(runtime_config.get('domain')):
delegated = delegated[:-len(runtime_config.get('domain'))]
delegated = delegated.rstrip('.')
# update domain
runtime_config['domain'] = '{0}.{1}'.format(
delegated, runtime_config.get('domain'))
self.action = self.config.resolve('lexicon:action')
self.provider_name = (self.config.resolve('lexicon:provider_name')
or self.config.resolve('lexicon:provider'))
self.config.add_config_source(DictConfigSource(runtime_config), 0)
provider_module = importlib.import_module(
'lexicon.providers.' + self.provider_name)
provider_class = getattr(provider_module, 'Provider')
self.provider = provider_class(self.config)
def execute(self):
"""Execute provided configuration in class constructor to the DNS records"""
self.provider.authenticate()
identifier = self.config.resolve('lexicon:identifier')
record_type = self.config.resolve('lexicon:type')
name = self.config.resolve('lexicon:name')
content = self.config.resolve('lexicon:content')
if self.action == 'create':
return self.provider.create_record(record_type, name, content)
if self.action == 'list':
return self.provider.list_records(record_type, name, content)
if self.action == 'update':
return self.provider.update_record(identifier, record_type, name, content)
if self.action == 'delete':
return self.provider.delete_record(identifier, record_type, name, content)
raise ValueError('Invalid action statement: {0}'.format(self.action))
def _validate_config(self):
provider_name = self.config.resolve('lexicon:provider_name')
if not self.config.resolve('lexicon:provider_name'):
raise AttributeError('provider_name')
try:
available = discovery.find_providers()[self.config.resolve('lexicon:provider_name')]
except KeyError:
raise ProviderNotAvailableError('This provider ({0}) is not supported by Lexicon.'
.format(provider_name))
else:
if not available:
raise ProviderNotAvailableError(
'This provider ({0}) has required dependencies that are missing. '
'Please install lexicon[{0}] first.'.format(provider_name))
if not self.config.resolve('lexicon:action'):
raise AttributeError('action')
if not self.config.resolve('lexicon:domain'):
raise AttributeError('domain')
if not self.config.resolve('lexicon:type'):
raise AttributeError('type')
<|code_end|>
| lexicon/client.py
<|code_start|>"""Main module of Lexicon. Defines the Client class, that holds all Lexicon logic."""
from __future__ import absolute_import
import importlib
import os
import tldextract
from lexicon import discovery
from lexicon.config import (
ConfigResolver, DictConfigSource,
legacy_config_resolver, non_interactive_config_resolver,
)
TLDEXTRACT_CACHE_FILE_DEFAULT = os.path.join('~', '.lexicon_tld_set')
TLDEXTRACT_CACHE_FILE = os.path.expanduser(os.environ.get("LEXICON_TLDEXTRACT_CACHE",
TLDEXTRACT_CACHE_FILE_DEFAULT))
class ProviderNotAvailableError(Exception):
"""
Custom exception to raise when a provider is not available,
typically because some optional dependencies are missing
"""
class Client(object): # pylint: disable=useless-object-inheritance,too-few-public-methods
"""This is the Lexicon client, that will execute all the logic."""
def __init__(self, config=None):
if not config:
# If there is not config specified, we load a non-interactive configuration.
self.config = non_interactive_config_resolver()
elif not isinstance(config, ConfigResolver):
# If config is not a ConfigResolver, we are in a legacy situation.
# We protect this part of the Client API.
self.config = legacy_config_resolver(config)
else:
self.config = config
# Validate configuration
self._validate_config()
runtime_config = {}
# Process domain, strip subdomain
domain_extractor = tldextract.TLDExtract(cache_file=TLDEXTRACT_CACHE_FILE,
include_psl_private_domains=True)
domain_parts = domain_extractor(
self.config.resolve('lexicon:domain'))
runtime_config['domain'] = '{0}.{1}'.format(
domain_parts.domain, domain_parts.suffix)
if self.config.resolve('lexicon:delegated'):
# handle delegated domain
delegated = self.config.resolve('lexicon:delegated').rstrip('.')
if delegated != runtime_config.get('domain'):
# convert to relative name
if delegated.endswith(runtime_config.get('domain')):
delegated = delegated[:-len(runtime_config.get('domain'))]
delegated = delegated.rstrip('.')
# update domain
runtime_config['domain'] = '{0}.{1}'.format(
delegated, runtime_config.get('domain'))
self.action = self.config.resolve('lexicon:action')
self.provider_name = (self.config.resolve('lexicon:provider_name')
or self.config.resolve('lexicon:provider'))
self.config.add_config_source(DictConfigSource(runtime_config), 0)
provider_module = importlib.import_module(
'lexicon.providers.' + self.provider_name)
provider_class = getattr(provider_module, 'Provider')
self.provider = provider_class(self.config)
def execute(self):
"""Execute provided configuration in class constructor to the DNS records"""
self.provider.authenticate()
identifier = self.config.resolve('lexicon:identifier')
record_type = self.config.resolve('lexicon:type')
name = self.config.resolve('lexicon:name')
content = self.config.resolve('lexicon:content')
if self.action == 'create':
return self.provider.create_record(record_type, name, content)
if self.action == 'list':
return self.provider.list_records(record_type, name, content)
if self.action == 'update':
return self.provider.update_record(identifier, record_type, name, content)
if self.action == 'delete':
return self.provider.delete_record(identifier, record_type, name, content)
raise ValueError('Invalid action statement: {0}'.format(self.action))
def _validate_config(self):
provider_name = self.config.resolve('lexicon:provider_name')
if not self.config.resolve('lexicon:provider_name'):
raise AttributeError('provider_name')
try:
available = discovery.find_providers()[self.config.resolve('lexicon:provider_name')]
except KeyError:
raise ProviderNotAvailableError('This provider ({0}) is not supported by Lexicon.'
.format(provider_name))
else:
if not available:
raise ProviderNotAvailableError(
'This provider ({0}) has required dependencies that are missing. '
'Please install lexicon[{0}] first.'.format(provider_name))
if not self.config.resolve('lexicon:action'):
raise AttributeError('action')
if not self.config.resolve('lexicon:domain'):
raise AttributeError('domain')
if not self.config.resolve('lexicon:type'):
raise AttributeError('type')
<|code_end|>
|
GoDaddy provider should recognize domaincontrol.com as its nameserver
For the auto provider, it should recognize that domains managed by GoDaddy often have nameservers under the *.domaincontrol.com namespace. You can verify this is GoDaddy via whois; and I also tested this by adding 'domaincontrol.com' to the recognized nameservers with the following patch.
```
--- providers/godaddy.py.orig 2020-01-09 08:58:26.160360574 +0000
+++ providers/godaddy.py 2020-01-10 19:27:29.292030195 +0000
@@ -14,5 +14,5 @@
LOGGER = logging.getLogger(__name__)
-NAMESERVER_DOMAINS = ['godaddy.com']
+NAMESERVER_DOMAINS = ['godaddy.com','domaincontrol.com']
```
And the current whois excerpt:
```
$ whois domaincontrol.com
Domain Name: DOMAINCONTROL.COM
...
Updated Date: 2018-08-07T19:25:37Z
...
Registrant Organization: Go Daddy Operating Company, LLC
Registrant State/Province: Arizona
Registrant Country: US
```
| lexicon/providers/godaddy.py
<|code_start|>"""Module provider for Godaddy"""
from __future__ import absolute_import
import hashlib
import json
import logging
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['godaddy.com']
def provider_parser(subparser):
"""Generate a subparser for Godaddy"""
subparser.add_argument(
'--auth-key', help='specify the key to access the API')
subparser.add_argument(
'--auth-secret', help='specify the secret to access the API')
class Provider(BaseProvider):
"""
Implements the DNS GoDaddy provider.
Some general remarks about this provider, because it uses a weirdly designed API.
Indeed, there is no direct way to insert, update or delete a specific record.
Furthermore, there is no unique identifier for a record.
Instead GoDaddy use a replace approach: for a given set of records one
can replace this set with a new set sent through API.
For the sake of simplicity and consistency across the provider edit methods,
the set will be always all records in the DNS zone.
With this approach:
- adding a record consists in appending a record to the obtained set and call
replace with the updated set,
- updating a record consists in modifying a record in the obtained set and call
replace with the updated set,
- deleting a record consists in removing a record in the obtained set and call
replace with the updated set.
In parallel, as said before, there is no unique identifier.
This provider then implement a pseudo-identifier, to allow an easy update or delete
using the '--identifier' lexicon parameter.
But you need to call the 'list' command just before executing and update/delete action,
because identifier value is tied to the content of the record, and will change anytime
something is changed in the record.
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = 'https://api.godaddy.com/v1'
def _authenticate(self):
domain = self.domain
result = self._get('/domains/{0}'.format(domain))
self.domain_id = result['domainId']
def _list_records(self, rtype=None, name=None, content=None):
domain = self.domain
url = '/domains/{0}/records'.format(domain)
if rtype:
url += '/{0}'.format(rtype)
if name:
url += '/{0}'.format(self._relative_name(name))
raws = self._get(url)
records = []
for raw in raws:
records.append({
'id': Provider._identifier(raw),
'type': raw['type'],
'name': self._full_name(raw['name']),
'ttl': raw['ttl'],
'content': raw['data']
})
if content:
records = [
record for record in records if record['data'] == content]
LOGGER.debug('list_records: %s', records)
return records
def _create_record(self, rtype, name, content):
domain = self.domain
relative_name = self._relative_name(name)
ttl = self._get_lexicon_option('ttl')
# Retrieve existing data in DNS zone.
records = self._get('/domains/{0}/records'.format(domain))
# Check if a record already matches given parameters
for record in records:
if (record['type'] == rtype and self._relative_name(record['name']) == relative_name
and record['data'] == content):
LOGGER.debug(
'create_record (ignored, duplicate): %s %s %s', rtype, name, content)
return True
# Append a new entry corresponding to given parameters.
data = {'type': rtype, 'name': relative_name, 'data': content}
if ttl:
data['ttl'] = ttl
records.append(data)
# Synchronize data with inserted record into DNS zone.
self._put('/domains/{0}/records'.format(domain), records)
LOGGER.debug('create_record: %s %s %s', rtype, name, content)
return True
def _update_record(self, identifier, rtype=None, name=None, content=None):
# No identifier is used with GoDaddy.
# We can rely either:
# - only on rtype/name to get the relevant records, both of them are required
# or we will could update to much records ...,
# - or by the pseudo-identifier provided
# Furthermore for rtype/name approach, we cannot update all matching records, as it
# would lead o an error (two entries of same rtype + name cannot have the same content).
# So for rtype/name approach, we search first matching record for rtype/name on which
# content is different, and we update it before synchronizing the DNS zone.
if not identifier and not rtype:
raise Exception('ERROR: rtype is required')
if not identifier and not name:
raise Exception('ERROR: name is required')
domain = self.domain
relative_name = None
if name:
relative_name = self._relative_name(name)
# Retrieve existing data in DNS zone.
records = self._get('/domains/{0}/records'.format(domain))
# Get the record to update:
# - either explicitly by its identifier,
# - or the first matching by its rtype+name where content does not match
# (first match, see first method comment for explanation).
for record in records:
if ((identifier and Provider._identifier(record) == identifier) or # pylint: disable=too-many-boolean-expressions
(not identifier and record['type'] == rtype
and self._relative_name(record['name']) == relative_name
and record['data'] != content)):
record['data'] = content
break
# Synchronize data with updated records into DNS zone.
self._put('/domains/{0}/records'.format(domain), records)
LOGGER.debug('update_record: %s %s %s', rtype, name, content)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
# For the LOL. GoDaddy does not accept an empty array
# when updating a particular set of records.
# It means that you cannot request to remove all records
# matching a particular rtype and/or name.
# Instead, we get ALL records in the DNS zone, update the set,
# and replace EVERYTHING in the DNS zone.
# You will always have at minimal NS/SRV entries in the array,
# otherwise your DNS zone is broken, and updating the zone is the least of your problem ...
domain = self.domain
# Retrieve all records in the DNS zone
records = self._get('/domains/{0}/records'.format(domain))
relative_name = None
if name:
relative_name = self._relative_name(name)
# Filter out all records which matches the pattern (either identifier
# or some combination of rtype/name/content).
filtered_records = []
if identifier:
filtered_records = [
record for record in records if Provider._identifier(record) != identifier]
else:
for record in records:
if ((not rtype and not relative_name and not content) # pylint: disable=too-many-boolean-expressions
or (rtype and not relative_name and not content and record['type'] != rtype)
or (not rtype and relative_name and not content
and self._relative_name(record['name']) != relative_name)
or (not rtype and not relative_name and content
and record['data'] != content)
or (rtype and relative_name and not content
and (record['type'] != rtype
or self._relative_name(record['name']) != relative_name))
or (rtype and not relative_name and content
and (record['type'] != rtype or record['data'] != content))
or (not rtype and relative_name and content
and (self._relative_name(record['name']) != relative_name
or record['data'] != content))
or (rtype and relative_name and content
and (record['type'] != rtype
or self._relative_name(record['name']) != relative_name
or record['data'] != content))):
filtered_records.append(record)
# Synchronize data with expurged entries into DNS zone.
self._put('/domains/{0}/records'.format(domain), filtered_records)
LOGGER.debug('delete_records: %s %s %s', rtype, name, content)
return True
# GoDaddy provides no identifier for a record, which is a problem
# where identifiers can be used (delete and update).
# To circumvent this, we implement a pseudo-identifier,which is basically
# a hash of type+name+content of a given record.
# It is far from perfect, as the identifier will change each time
# we change something in the record ...
# But at least, one can use 'lexicon godaddy list ...' then
# 'lexicon godaddy update --identifier ...' to modify specific record.
# However, 'lexicon godaddy list ...' should be called each time DNS
# zone had been changed to calculate new identifiers.
@staticmethod
def _identifier(record):
sha256 = hashlib.sha256()
sha256.update(('type=' + record.get('type', '') + ',').encode('utf-8'))
sha256.update(('name=' + record.get('name', '') + ',').encode('utf-8'))
sha256.update(('data=' + record.get('data', '') + ',').encode('utf-8'))
return sha256.hexdigest()[0:7]
def _request(self, action='GET', url='/', data=None, query_params=None):
if not data:
data = {}
if not query_params:
query_params = {}
# When editing DNS zone, API is unavailable for few seconds
# (until modifications are propagated).
# In this case, call to API will return 409 HTTP error.
# We use the Retry extension to retry the requests until
# we get a processable reponse (402 HTTP status, or an HTTP error != 409)
retries = Retry(
total=10,
backoff_factor=0.5,
status_forcelist=[409],
method_whitelist=frozenset(
['GET', 'PUT', 'POST', 'DELETE', 'PATCH'])
)
session = requests.Session()
session.mount('https://', HTTPAdapter(max_retries=retries))
result = session.request(action, self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
# GoDaddy use a key/secret pair to authenticate
'Authorization': 'sso-key {0}:{1}'.format(
self._get_provider_option(
'auth_key'),
self._get_provider_option('auth_secret'))
})
result.raise_for_status()
try:
# Return the JSON body response if exists.
return result.json()
except ValueError:
# For some requests command (eg. PUT), GoDaddy will not
# return any JSON, just an HTTP status without body.
return None
<|code_end|>
| lexicon/providers/godaddy.py
<|code_start|>"""Module provider for Godaddy"""
from __future__ import absolute_import
import hashlib
import json
import logging
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['godaddy.com', 'domaincontrol.com']
def provider_parser(subparser):
"""Generate a subparser for Godaddy"""
subparser.add_argument(
'--auth-key', help='specify the key to access the API')
subparser.add_argument(
'--auth-secret', help='specify the secret to access the API')
class Provider(BaseProvider):
"""
Implements the DNS GoDaddy provider.
Some general remarks about this provider, because it uses a weirdly designed API.
Indeed, there is no direct way to insert, update or delete a specific record.
Furthermore, there is no unique identifier for a record.
Instead GoDaddy use a replace approach: for a given set of records one
can replace this set with a new set sent through API.
For the sake of simplicity and consistency across the provider edit methods,
the set will be always all records in the DNS zone.
With this approach:
- adding a record consists in appending a record to the obtained set and call
replace with the updated set,
- updating a record consists in modifying a record in the obtained set and call
replace with the updated set,
- deleting a record consists in removing a record in the obtained set and call
replace with the updated set.
In parallel, as said before, there is no unique identifier.
This provider then implement a pseudo-identifier, to allow an easy update or delete
using the '--identifier' lexicon parameter.
But you need to call the 'list' command just before executing and update/delete action,
because identifier value is tied to the content of the record, and will change anytime
something is changed in the record.
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = 'https://api.godaddy.com/v1'
def _authenticate(self):
domain = self.domain
result = self._get('/domains/{0}'.format(domain))
self.domain_id = result['domainId']
def _list_records(self, rtype=None, name=None, content=None):
domain = self.domain
url = '/domains/{0}/records'.format(domain)
if rtype:
url += '/{0}'.format(rtype)
if name:
url += '/{0}'.format(self._relative_name(name))
raws = self._get(url)
records = []
for raw in raws:
records.append({
'id': Provider._identifier(raw),
'type': raw['type'],
'name': self._full_name(raw['name']),
'ttl': raw['ttl'],
'content': raw['data']
})
if content:
records = [
record for record in records if record['data'] == content]
LOGGER.debug('list_records: %s', records)
return records
def _create_record(self, rtype, name, content):
domain = self.domain
relative_name = self._relative_name(name)
ttl = self._get_lexicon_option('ttl')
# Retrieve existing data in DNS zone.
records = self._get('/domains/{0}/records'.format(domain))
# Check if a record already matches given parameters
for record in records:
if (record['type'] == rtype and self._relative_name(record['name']) == relative_name
and record['data'] == content):
LOGGER.debug(
'create_record (ignored, duplicate): %s %s %s', rtype, name, content)
return True
# Append a new entry corresponding to given parameters.
data = {'type': rtype, 'name': relative_name, 'data': content}
if ttl:
data['ttl'] = ttl
records.append(data)
# Synchronize data with inserted record into DNS zone.
self._put('/domains/{0}/records'.format(domain), records)
LOGGER.debug('create_record: %s %s %s', rtype, name, content)
return True
def _update_record(self, identifier, rtype=None, name=None, content=None):
# No identifier is used with GoDaddy.
# We can rely either:
# - only on rtype/name to get the relevant records, both of them are required
# or we will could update to much records ...,
# - or by the pseudo-identifier provided
# Furthermore for rtype/name approach, we cannot update all matching records, as it
# would lead o an error (two entries of same rtype + name cannot have the same content).
# So for rtype/name approach, we search first matching record for rtype/name on which
# content is different, and we update it before synchronizing the DNS zone.
if not identifier and not rtype:
raise Exception('ERROR: rtype is required')
if not identifier and not name:
raise Exception('ERROR: name is required')
domain = self.domain
relative_name = None
if name:
relative_name = self._relative_name(name)
# Retrieve existing data in DNS zone.
records = self._get('/domains/{0}/records'.format(domain))
# Get the record to update:
# - either explicitly by its identifier,
# - or the first matching by its rtype+name where content does not match
# (first match, see first method comment for explanation).
for record in records:
if ((identifier and Provider._identifier(record) == identifier) or # pylint: disable=too-many-boolean-expressions
(not identifier and record['type'] == rtype
and self._relative_name(record['name']) == relative_name
and record['data'] != content)):
record['data'] = content
break
# Synchronize data with updated records into DNS zone.
self._put('/domains/{0}/records'.format(domain), records)
LOGGER.debug('update_record: %s %s %s', rtype, name, content)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
# For the LOL. GoDaddy does not accept an empty array
# when updating a particular set of records.
# It means that you cannot request to remove all records
# matching a particular rtype and/or name.
# Instead, we get ALL records in the DNS zone, update the set,
# and replace EVERYTHING in the DNS zone.
# You will always have at minimal NS/SRV entries in the array,
# otherwise your DNS zone is broken, and updating the zone is the least of your problem ...
domain = self.domain
# Retrieve all records in the DNS zone
records = self._get('/domains/{0}/records'.format(domain))
relative_name = None
if name:
relative_name = self._relative_name(name)
# Filter out all records which matches the pattern (either identifier
# or some combination of rtype/name/content).
filtered_records = []
if identifier:
filtered_records = [
record for record in records if Provider._identifier(record) != identifier]
else:
for record in records:
if ((not rtype and not relative_name and not content) # pylint: disable=too-many-boolean-expressions
or (rtype and not relative_name and not content and record['type'] != rtype)
or (not rtype and relative_name and not content
and self._relative_name(record['name']) != relative_name)
or (not rtype and not relative_name and content
and record['data'] != content)
or (rtype and relative_name and not content
and (record['type'] != rtype
or self._relative_name(record['name']) != relative_name))
or (rtype and not relative_name and content
and (record['type'] != rtype or record['data'] != content))
or (not rtype and relative_name and content
and (self._relative_name(record['name']) != relative_name
or record['data'] != content))
or (rtype and relative_name and content
and (record['type'] != rtype
or self._relative_name(record['name']) != relative_name
or record['data'] != content))):
filtered_records.append(record)
# Synchronize data with expurged entries into DNS zone.
self._put('/domains/{0}/records'.format(domain), filtered_records)
LOGGER.debug('delete_records: %s %s %s', rtype, name, content)
return True
# GoDaddy provides no identifier for a record, which is a problem
# where identifiers can be used (delete and update).
# To circumvent this, we implement a pseudo-identifier,which is basically
# a hash of type+name+content of a given record.
# It is far from perfect, as the identifier will change each time
# we change something in the record ...
# But at least, one can use 'lexicon godaddy list ...' then
# 'lexicon godaddy update --identifier ...' to modify specific record.
# However, 'lexicon godaddy list ...' should be called each time DNS
# zone had been changed to calculate new identifiers.
@staticmethod
def _identifier(record):
sha256 = hashlib.sha256()
sha256.update(('type=' + record.get('type', '') + ',').encode('utf-8'))
sha256.update(('name=' + record.get('name', '') + ',').encode('utf-8'))
sha256.update(('data=' + record.get('data', '') + ',').encode('utf-8'))
return sha256.hexdigest()[0:7]
def _request(self, action='GET', url='/', data=None, query_params=None):
if not data:
data = {}
if not query_params:
query_params = {}
# When editing DNS zone, API is unavailable for few seconds
# (until modifications are propagated).
# In this case, call to API will return 409 HTTP error.
# We use the Retry extension to retry the requests until
# we get a processable reponse (402 HTTP status, or an HTTP error != 409)
retries = Retry(
total=10,
backoff_factor=0.5,
status_forcelist=[409],
method_whitelist=frozenset(
['GET', 'PUT', 'POST', 'DELETE', 'PATCH'])
)
session = requests.Session()
session.mount('https://', HTTPAdapter(max_retries=retries))
result = session.request(action, self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
# GoDaddy use a key/secret pair to authenticate
'Authorization': 'sso-key {0}:{1}'.format(
self._get_provider_option(
'auth_key'),
self._get_provider_option('auth_secret'))
})
result.raise_for_status()
try:
# Return the JSON body response if exists.
return result.json()
except ValueError:
# For some requests command (eg. PUT), GoDaddy will not
# return any JSON, just an HTTP status without body.
return None
<|code_end|>
|
Provider Hetzner KonsoleH is broken
When using the latest version of lexicon, i.e.
```
$ lexicon --version
lexicon 3.3.11
```
there is a problem parsing the website:
```
$ lexicon hetzner create evk.services TXT --log_level DEBUG --name "_foo.foo2" --content "foo2"
Arguments: Namespace(action='create', auth_account=None, auth_password=None, auth_username=None, config_dir='/home/work/it/lexicon-github', content='foo2', delegated=None, domain='evk.services', identifier=None, latency=30, linked='yes', log_level='DEBUG', name='_foo.foo2', output='TABLE', priority=None, propagated='yes', provider_name='hetzner', ttl=None, type='TXT')
Warning: Use of environment variable LEXICON_HETZNER_USERNAME is deprecated. Try LEXICON_HETZNER_AUTH_USERNAME instead.
Warning: Use of environment variable LEXICON_HETZNER_PASSWORD is deprecated. Try LEXICON_HETZNER_AUTH_PASSWORD instead.
Hetzner => Enable CNAME lookup (see --linked parameter)
DNS Lookup => evk.services. IN SOA ns1.your-server.de. postmaster.your-server.de. 2019121911 10800 3600 604800 3600
DNS Lookup => ns1.your-server.de. IN A 213.133.106.251
DNS Lookup => ns1.your-server.de. IN AAAA 2a01:4f8:d0a:2006::2
DNS Lookup => evk.services. IN NS ns.second-ns.com.
DNS Lookup => evk.services. IN NS ns3.second-ns.de.
DNS Lookup => evk.services. IN NS ns1.your-server.de.
DNS Lookup => ns.second-ns.com. IN A 213.239.204.242
DNS Lookup => ns.second-ns.com. IN AAAA 2a01:4f8:0:a101::b:1
DNS Lookup => ns3.second-ns.de. IN A 193.47.99.4
DNS Lookup => ns3.second-ns.de. IN AAAA 2001:67c:192c::add:b3
DNS Lookup => ns1.your-server.de. IN A 213.133.106.251
DNS Lookup => ns1.your-server.de. IN AAAA 2a01:4f8:d0a:2006::2
DNS Lookup => evk.services IN NS 213.133.106.251 2a01:4f8:d0a:2006::2 213.239.204.242 2a01:4f8:0:a101::b:1 193.47.99.4 2001:67c:192c::add:b3
DNS Lookup => The DNS response does not contain an answer to the question: _foo.foo2.evk.services. IN CNAME
Starting new HTTPS connection (1): konsoleh.your-server.de:443
https://konsoleh.your-server.de:443 "GET / HTTP/1.1" 200 2184
https://konsoleh.your-server.de:443 "POST /login.php HTTP/1.1" 302 0
https://konsoleh.your-server.de:443 "GET / HTTP/1.1" 200 2019
Hetzner => Authenticate session with konsoleh account 'CXXXXXXXXX'
https://konsoleh.your-server.de:443 "GET /?page=1 HTTP/1.1" 200 2019
Hetzner => Get ID D1290385919 for domain evk.services
https://konsoleh.your-server.de:443 "GET /logout.php HTTP/1.1" 302 0
https://konsoleh.your-server.de:443 "GET /?err=logout HTTP/1.1" 200 2114
Hetzner => Exit session
Hetzner => Enable CNAME lookup (see --linked parameter)
DNS Lookup => evk.services. IN SOA ns1.your-server.de. postmaster.your-server.de. 2019121911 10800 3600 604800 3600
DNS Lookup => ns1.your-server.de. IN A 213.133.106.251
DNS Lookup => ns1.your-server.de. IN AAAA 2a01:4f8:d0a:2006::2
DNS Lookup => evk.services. IN NS ns3.second-ns.de.
DNS Lookup => evk.services. IN NS ns1.your-server.de.
DNS Lookup => evk.services. IN NS ns.second-ns.com.
DNS Lookup => ns3.second-ns.de. IN A 193.47.99.4
DNS Lookup => ns3.second-ns.de. IN AAAA 2001:67c:192c::add:b3
DNS Lookup => ns1.your-server.de. IN A 213.133.106.251
DNS Lookup => ns1.your-server.de. IN AAAA 2a01:4f8:d0a:2006::2
DNS Lookup => ns.second-ns.com. IN A 213.239.204.242
DNS Lookup => ns.second-ns.com. IN AAAA 2a01:4f8:0:a101::b:1
DNS Lookup => evk.services IN NS 213.133.106.251 2a01:4f8:d0a:2006::2 193.47.99.4 2001:67c:192c::add:b3 213.239.204.242 2a01:4f8:0:a101::b:1
DNS Lookup => The DNS response does not contain an answer to the question: _foo.foo2.evk.services. IN CNAME
Starting new HTTPS connection (1): konsoleh.your-server.de:443
https://konsoleh.your-server.de:443 "GET / HTTP/1.1" 200 2182
https://konsoleh.your-server.de:443 "POST /login.php HTTP/1.1" 302 0
https://konsoleh.your-server.de:443 "GET / HTTP/1.1" 200 2019
Hetzner => Authenticate session with konsoleh account 'CXXXXXXXX'
https://konsoleh.your-server.de:443 "GET /?domain_number=DXXXXXXX HTTP/1.1" 200 3285
https://konsoleh.your-server.de:443 "GET /dns.php?dnsaction2=editintextarea HTTP/1.1" 200 3744
https://konsoleh.your-server.de:443 "GET /logout.php HTTP/1.1" 302 0
https://konsoleh.your-server.de:443 "GET /?err=logout HTTP/1.1" 200 2115
Hetzner => Exit session
Traceback (most recent call last):
File "/home/work/it/lexicon-github/venv/bin/lexicon", line 11, in <module>
load_entry_point('dns-lexicon', 'console_scripts', 'lexicon')()
File "/home/work/it/lexicon-github/lexicon/cli.py", line 117, in main
results = client.execute()
File "/home/work/it/lexicon-github/lexicon/client.py", line 78, in execute
return self.provider.create_record(record_type, name, content)
File "/home/work/it/lexicon-github/lexicon/providers/base.py", line 80, in create_record
return self._create_record(rtype, name, content)
File "/home/work/it/lexicon-github/lexicon/providers/hetzner.py", line 180, in _create_record
with self._session(self.domain, self.domain_id) as ddata:
File "/usr/lib/python3.7/contextlib.py", line 112, in __enter__
return next(self.gen)
File "/home/work/it/lexicon-github/lexicon/providers/hetzner.py", line 606, in _session
raise exc
File "/home/work/it/lexicon-github/lexicon/providers/hetzner.py", line 603, in _session
zone = self._get_zone(qdomain, qdomain_id)
File "/home/work/it/lexicon-github/lexicon/providers/hetzner.py", line 691, in _get_zone
zone_file = Provider._filter_dom(dom, zone_file_filter).renderContents().decode('UTF-8')
AttributeError: 'NoneType' object has no attribute 'renderContents'
```
I do have some problems running the actual test-suite as well:
```
> cassette=self.cassette, failed_request=self._vcr_request
)
E vcr.errors.CannotOverwriteExistingCassetteException: Can't overwrite existing cassette ('tests/fixtures/cassettes/hetzner/KonsoleH-IntegrationTests/test_provider_authenticate.yaml') in your current record mode ('none').
E No match for the request (<Request (GET) https://konsoleh.your-server.de/>) was found.
E No similar requests, that have not been played, found.
../venv/lib/python3.7/site-packages/vcr/stubs/__init__.py:232: CannotOverwriteExistingCassetteException
_ HetznerKonsoleHProviderTests.test_provider_when_calling_create_record_for_A_with_valid_name_and_content _
```
Please can someone provide short guidance how to set the config yaml file or environment variables correctly for Hetzner KonsoleH - thanks.
| lexicon/providers/hetzner.py
<|code_start|>"""Module provider for Hetzner"""
from __future__ import absolute_import
from __future__ import unicode_literals
from contextlib import contextmanager
import hashlib
import logging
import re
import time
import requests
from six import string_types
from urllib3.util.retry import Retry
# Due to optional requirement
try:
from bs4 import BeautifulSoup
import dns.resolver
import dns.zone
except ImportError:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = []
def provider_parser(subparser):
"""Configure a provider parser for Hetzner"""
subparser.add_argument('--auth-account',
help='specify type of Hetzner account: by default Hetzner Robot '
'(robot) or Hetzner konsoleH (konsoleh)')
subparser.add_argument('--auth-username', help='specify username of Hetzner account')
subparser.add_argument('--auth-password', help='specify password of Hetzner account')
subparser.add_argument('--linked',
help='if exists, uses linked CNAME as A|AAAA|TXT record name for edit '
'actions: by default (yes); Further restriction: Only enabled if '
'record name or raw FQDN record identifier \'type/name/content\' is '
'specified, and additionally for update actions the record name '
'remains the same',
default=str('yes'),
choices=['yes', 'no'])
subparser.add_argument('--propagated',
help='waits until record is publicly propagated after succeeded '
'create|update actions: by default (yes)',
default=str('yes'),
choices=['yes', 'no'])
subparser.add_argument('--latency',
help='specify latency, used during checks for publicly propagation '
'and additionally for Hetzner Robot after record edits: by default '
'30s (30)',
default=int(30),
type=int)
class Provider(BaseProvider):
"""
Implements the Hetzner DNS Provider.
There are two variants to manage DNS records on Hetzner: Hetzner Robot or
Hetzner konsoleH. Both do not provide a common API, therefore this provider
implements missing read and write methods in a generic way. For editing DNS
records on Hetzner, this provider manipulates and replaces the whole DNS zone.
Furthermore, there is no unique identifier to each record in the way that Lexicon
expects, why this provider implements a pseudo-identifer based on the record type,
name and content for use of the --identifier parameter. Supported identifier
formats are:
- hash generated|verified by 'list' command; e.g. '30fa112'
- raw concatenation of the record type, name (FQDN) and content (if possible
FQDN) with delimiter '/'; e.g. 'SRV/example.com./0 0 443 msx.example.com.'
or 'TXT/example.com./challengetoken'
Additional, this provider implements the option of replacing an A, AAAA or TXT record
name with an existent linked CNAME for edit actions via the --linked parameter and
the option of waiting until record is publicly propagated after succeeded create or
update actions via the --propagated parameter. As further restriction, the use of a
linked CNAME is only enabled if the record type & record name or the raw identifier are
specified, and additionally for the update action the record name remains the same.
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api = {
'robot': {
'endpoint': 'https://robot.your-server.de',
'filter': [{'name': 'div', 'attrs': {'id': 'center_col'}}],
'auth': {
'endpoint': 'https://accounts.hetzner.com',
'GET': {'url': '/login'},
'POST': {'url': '/login_check'},
'filter': [{'name': 'form', 'attrs': {'id': 'login-form'}}],
'user': '_username',
'pass': '_password'
},
'exit': {
'GET': {'url': '/login/logout/r/true'}
},
'domain_id': {
'GET': {'url': '/dns/index/page/<index>'},
'filter': [
{'name': 'div', 'attrs': {'id': 'center_col'}},
{'name': 'table', 'attrs': {'class': 'box_title'}}
],
'domain': [{'name': 'td', 'attrs': {'class': 'title'}}],
'id': {'attr': 'onclick', 'regex': r'\'(\d+)\''}
},
'zone': {
'GET': [{'url': '/dns/update/id/<id>'}],
'POST': {'url': '/dns/update'},
'filter': [
{'name': 'div', 'attrs': {'id': 'center_col'}},
{'name': 'ul', 'attrs': {'class': 'error_list'}}
],
'file': 'zonefile'
}
},
'konsoleh': {
'endpoint': 'https://konsoleh.your-server.de',
'filter': [{'name': 'div', 'attrs': {'id': 'content'}}],
'auth': {
'GET': {},
'POST': {'url': '/login.php'},
'filter': [{'name': 'form', 'attrs': {'id': 'loginform'}}],
'user': 'login_user_inputbox',
'pass': 'login_pass_inputbox'
},
'exit': {
'GET': {'url': '/logout.php'}
},
'domain_id': {
'GET': {'params': {'page': '<index>'}},
'filter': [
{'name': 'div', 'attrs': {'id': 'domainlist'}},
{'name': 'dl'},
{'name': 'a'}
],
'domain': [{'name': 'strong'}],
'id': {'attr': 'href', 'regex': r'=(D\d+)'}
},
'zone': {
'GET': [
{'params': {'domain_number': '<id>'}},
{'url': '/dns.php', 'params': {'dnsaction2': 'editintextarea'}}
],
'POST': {'url': '/dns.php'},
'filter': [
{'name': 'div', 'attrs': {'id': 'content'}},
{'name': 'div', 'attrs': {'class': 'error'}}
],
'file': 'zone_file1'
}
}
}
self.session = None
self.account = self._get_provider_option('auth_account')
if self.account in (None, 'robot', 'konsoleh'):
self.account = self.account if self.account else 'robot'
else:
LOGGER.error('Hetzner => Argument for --auth-account is invalid: \'%s\' '
'(choose from \'robot\' or \'konsoleh\')', self.account)
raise AssertionError
self.username = self._get_provider_option('auth_username')
assert self.username is not None
self.password = self._get_provider_option('auth_password')
assert self.password is not None
def _authenticate(self):
"""
Connects to Hetzner account and returns, if authentification was
successful and the domain or CNAME target is managed by this account.
"""
with self._session(self.domain, get_zone=False):
return True
def _create_record(self, rtype, name, content):
"""
Connects to Hetzner account, adds a new record to the zone and returns a
boolean, if creation was successful or not. Needed record rtype, name and
content for record to create.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if not rtype or not name or not content:
LOGGER.warning('Hetzner => Record has no rtype|name|content specified')
return False
# Add record to zone
name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name)
rrset = ddata['zone']['data'].get_rdataset(name, rdtype=rtype, create=True)
for rdata in rrset:
if self._convert_content(rtype, content) == rdata.to_text():
LOGGER.info('Hetzner => Record with content \'%s\' already exists',
content)
return True
ttl = (rrset.ttl if 0 < rrset.ttl < self._get_lexicon_option('ttl')
else self._get_lexicon_option('ttl'))
rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype,
ttl, self._convert_content(rtype, content))
rrset.update(rdataset)
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
if synced_change:
self._propagated_record(rtype, name, self._convert_content(rtype, content),
ddata['nameservers'])
return synced_change
def _list_records(self, rtype=None, name=None, content=None):
"""
Connects to Hetzner account and returns a list of records filtered by record
rtype, name and content. The list is empty if no records found.
"""
with self._session(self.domain, self.domain_id) as ddata:
name = self._fqdn_name(name) if name else None
return self._list_records_in_zone(ddata['zone']['data'], rtype, name, content)
def _update_record(self, identifier=None, rtype=None, name=None, content=None): # pylint: disable=too-many-locals,too-many-branches
"""
Connects to Hetzner account, changes an existing record and returns a boolean,
if update was successful or not. Needed identifier or rtype & name to lookup
over all records of the zone for exactly one record to update.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if identifier:
dtype, dname, dcontent = self._parse_identifier(identifier, ddata['zone']['data'])
if dtype and dname and dcontent:
rtype = rtype if rtype else dtype
name = name if name else dname
content = content if content else dcontent
else:
LOGGER.warning('Hetzner => Record with identifier \'%s\' does not exist',
identifier)
return False
elif rtype and name and content:
dtype, dname, dcontent = rtype, name, None
else:
LOGGER.warning('Hetzner => Record has no rtype|name|content specified')
return False
dname = ddata['cname'] if ddata['cname'] else self._fqdn_name(dname)
records = self._list_records_in_zone(ddata['zone']['data'], dtype, dname, dcontent)
if len(records) == 1:
# Remove record from zone
rrset = ddata['zone']['data'].get_rdataset(records[0]['name'] + '.',
rdtype=records[0]['type'])
rdatas = []
for rdata in rrset:
if self._convert_content(records[0]['type'],
records[0]['content']) != rdata.to_text():
rdatas.append(rdata.to_text())
if rdatas:
rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype,
records[0]['ttl'], rdatas)
ddata['zone']['data'].replace_rdataset(records[0]['name'] + '.', rdataset)
else:
ddata['zone']['data'].delete_rdataset(records[0]['name'] + '.',
records[0]['type'])
# Add record to zone
name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name)
rrset = ddata['zone']['data'].get_rdataset(name, rdtype=rtype, create=True)
synced_change = False
for rdata in rrset:
if self._convert_content(rtype, content) == rdata.to_text():
LOGGER.info('Hetzner => Record with content \'%s\' already exists',
content)
synced_change = True
break
if not synced_change:
ttl = (rrset.ttl if 0 < rrset.ttl < self._get_lexicon_option('ttl')
else self._get_lexicon_option('ttl'))
rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype, ttl,
self._convert_content(rtype, content))
rrset.update(rdataset)
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
if synced_change:
self._propagated_record(rtype, name, self._convert_content(rtype, content),
ddata['nameservers'])
return synced_change
LOGGER.warning('Hetzner => Record lookup has not only one match')
return False
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""
Connects to Hetzner account, removes an existing record from the zone and returns a
boolean, if deletion was successful or not. Uses identifier or rtype, name & content to
lookup over all records of the zone for one or more records to delete.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if identifier:
rtype, name, content = self._parse_identifier(identifier, ddata['zone']['data'])
if rtype is None or name is None or content is None:
LOGGER.info('Hetzner => Record with identifier \'%s\' does not exist',
identifier)
return True
name = ddata['cname'] if ddata['cname'] else (self._fqdn_name(name) if name else None)
records = self._list_records_in_zone(ddata['zone']['data'], rtype, name, content)
if records:
# Remove records from zone
for record in records:
rrset = ddata['zone']['data'].get_rdataset(record['name'] + '.',
rdtype=record['type'])
rdatas = []
for rdata in rrset:
if self._convert_content(record['type'],
record['content']) != rdata.to_text():
rdatas.append(rdata.to_text())
if rdatas:
rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype,
record['ttl'], rdatas)
ddata['zone']['data'].replace_rdataset(record['name'] + '.', rdataset)
else:
ddata['zone']['data'].delete_rdataset(record['name'] + '.', record['type'])
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
return synced_change
LOGGER.info('Hetzner => Record lookup has no matches')
return True
###############################################################################
# Provider base helpers
###############################################################################
@staticmethod
def _create_identifier(rdtype, name, content):
"""
Creates hashed identifier based on full qualified record type, name & content
and returns hash.
"""
sha256 = hashlib.sha256()
sha256.update((rdtype + '/').encode('UTF-8'))
sha256.update((name + '/').encode('UTF-8'))
sha256.update(content.encode('UTF-8'))
return sha256.hexdigest()[0:7]
def _parse_identifier(self, identifier, zone=None):
"""
Parses the record identifier and returns type, name & content of the associated record
as tuple. The tuple is empty if no associated record found.
"""
rdtype, name, content = None, None, None
if len(identifier) > 7:
parts = identifier.split('/')
rdtype, name, content = parts[0], parts[1], '/'.join(parts[2:])
else:
records = self._list_records_in_zone(zone)
for record in records:
if record['id'] == identifier:
rdtype, name, content = record['type'], record['name'] + '.', record['content']
return rdtype, name, content
def _convert_content(self, rdtype, content):
"""
Converts type dependent record content into well formed and fully qualified
content for domain zone and returns content.
"""
if rdtype == 'TXT':
if content[0] != '"':
content = '"' + content
if content[-1] != '"':
content += '"'
if rdtype in ('CNAME', 'MX', 'NS', 'SRV'):
if content[-1] != '.':
content = self._fqdn_name(content)
return content
def _list_records_in_zone(self, zone, rdtype=None, name=None, content=None):
"""
Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found.
"""
records = []
rrsets = zone.iterate_rdatasets() if zone else []
for rname, rdataset in rrsets:
rtype = dns.rdatatype.to_text(rdataset.rdtype)
if ((not rdtype or rdtype == rtype)
and (not name or name == rname.to_text())):
for rdata in rdataset:
rdata = rdata.to_text()
if not content or self._convert_content(rtype, content) == rdata:
raw_rdata = self._clean_TXT_record({'type': rtype,
'content': rdata})['content']
data = {
'type': rtype,
'name': rname.to_text(True),
'ttl': int(rdataset.ttl),
'content': raw_rdata,
'id': Provider._create_identifier(rtype, rname.to_text(), raw_rdata)
}
records.append(data)
return records
def _request(self, action='GET', url='/', data=None, query_params=None):
"""
Requests to Hetzner by current session and returns the response.
"""
if data is None:
data = {}
if query_params is None:
query_params = {}
response = self.session.request(action, self.api[self.account]['endpoint'] + url,
params=query_params, data=data)
response.raise_for_status()
return response
###############################################################################
# Provider option helpers
###############################################################################
@staticmethod
def _dns_lookup(name, rdtype, nameservers=None):
"""
Looks on specified or default system domain nameservers to resolve record type
& name and returns record set. The record set is empty if no propagated
record found.
"""
rrset = dns.rrset.from_text(name, 0, 1, rdtype)
try:
resolver = dns.resolver.Resolver()
resolver.lifetime = 1
if nameservers:
resolver.nameservers = nameservers
rrset = resolver.query(name, rdtype)
for rdata in rrset:
LOGGER.debug('DNS Lookup => %s %s %s %s',
rrset.name.to_text(), dns.rdataclass.to_text(rrset.rdclass),
dns.rdatatype.to_text(rrset.rdtype), rdata.to_text())
except dns.exception.DNSException as error:
LOGGER.debug('DNS Lookup => %s', error)
return rrset
@staticmethod
def _get_nameservers(domain):
"""
Looks for domain nameservers and returns the IPs of the nameservers as a list.
The list is empty, if no nameservers were found. Needed associated domain zone
name for lookup.
"""
nameservers = []
rdtypes_ns = ['SOA', 'NS']
rdtypes_ip = ['A', 'AAAA']
for rdtype_ns in rdtypes_ns:
for rdata_ns in Provider._dns_lookup(domain, rdtype_ns):
for rdtype_ip in rdtypes_ip:
for rdata_ip in Provider._dns_lookup(rdata_ns.to_text().split(' ')[0],
rdtype_ip):
if rdata_ip.to_text() not in nameservers:
nameservers.append(rdata_ip.to_text())
LOGGER.debug('DNS Lookup => %s IN NS %s', domain, ' '.join(nameservers))
return nameservers
@staticmethod
def _get_dns_cname(name, link=False):
"""
Looks for associated domain zone, nameservers and linked record name until no
more linked record name was found for the given fully qualified record name or
the CNAME lookup was disabled, and then returns the parameters as a tuple.
"""
resolver = dns.resolver.Resolver()
resolver.lifetime = 1
domain = dns.resolver.zone_for_name(name, resolver=resolver).to_text(True)
nameservers = Provider._get_nameservers(domain)
cname = None
links, max_links = 0, 5
while link:
if links >= max_links:
LOGGER.error('Hetzner => Record %s has more than %d linked CNAME '
'records. Reduce the amount of CNAME links!',
name, max_links)
raise AssertionError
qname = cname if cname else name
rrset = Provider._dns_lookup(qname, 'CNAME', nameservers)
if rrset:
links += 1
cname = rrset[0].to_text()
qdomain = dns.resolver.zone_for_name(cname, resolver=resolver).to_text(True)
if domain != qdomain:
domain = qdomain
nameservers = Provider._get_nameservers(qdomain)
else:
link = False
if cname:
LOGGER.info('Hetzner => Record %s has CNAME %s', name, cname)
return domain, nameservers, cname
def _link_record(self):
"""
Checks restrictions for use of CNAME lookup and returns a tuple of the
fully qualified record name to lookup and a boolean, if a CNAME lookup
should be done or not. The fully qualified record name is empty if no
record name is specified by this provider.
"""
action = self._get_lexicon_option('action')
identifier = self._get_lexicon_option('identifier')
rdtype = self._get_lexicon_option('type')
name = (self._fqdn_name(self._get_lexicon_option('name'))
if self._get_lexicon_option('name') else None)
link = self._get_provider_option('linked')
qname = name
if identifier:
rdtype, name, _ = self._parse_identifier(identifier)
if action != 'list' and rdtype in ('A', 'AAAA', 'TXT') and name and link == 'yes':
if action != 'update' or name == qname or not qname:
LOGGER.info('Hetzner => Enable CNAME lookup '
'(see --linked parameter)')
return name, True
LOGGER.info('Hetzner => Disable CNAME lookup '
'(see --linked parameter)')
return name, False
def _propagated_record(self, rdtype, name, content, nameservers=None):
"""
If the publicly propagation check should be done, waits until the domain nameservers
responses with the propagated record type, name & content and returns a boolean,
if the publicly propagation was successful or not.
"""
latency = self._get_provider_option('latency')
propagated = self._get_provider_option('propagated')
if propagated == 'yes':
retry, max_retry = 0, 20
while retry < max_retry:
for rdata in Provider._dns_lookup(name, rdtype, nameservers):
if content == rdata.to_text():
LOGGER.info('Hetzner => Record %s has %s %s', name, rdtype, content)
return True
retry += 1
retry_log = (', retry ({}/{}) in {}s...'.format((retry + 1), max_retry, latency)
if retry < max_retry else '')
LOGGER.info('Hetzner => Record is not propagated%s', retry_log)
time.sleep(latency)
return False
###############################################################################
# Hetzner API helpers
###############################################################################
@staticmethod
def _filter_dom(dom, filters, last_find_all=False):
"""
If not exists, creates an DOM from a given session response, then filters the DOM
via given API filters and returns the filtered DOM. The DOM is empty if the filters
have no match.
"""
if isinstance(dom, string_types):
dom = BeautifulSoup(dom, 'html.parser')
for idx, find in enumerate(filters, start=1):
if not dom:
break
name, attrs = find.get('name'), find.get('attrs', {})
if len(filters) == idx and last_find_all:
dom = dom.find_all(name, attrs=attrs) if name else dom.find_all(attrs=attrs)
else:
dom = dom.find(name, attrs=attrs) if name else dom.find(attrs=attrs)
return dom
@staticmethod
def _extract_hidden_data(dom):
"""
Extracts hidden input data from DOM and returns the data as dictionary.
"""
input_tags = dom.find_all('input', attrs={'type': 'hidden'})
data = {}
for input_tag in input_tags:
data[input_tag['name']] = input_tag['value']
return data
@staticmethod
def _extract_domain_id(string, regex):
"""
Extracts domain ID from given string and returns the domain ID.
"""
regex = re.compile(regex)
match = regex.search(string)
if not match:
return False
return str(match.group(1))
@contextmanager
def _session(self, domain, domain_id=None, get_zone=True):
"""
Generates, authenticates and exits session to Hetzner account, and
provides tuple of additional needed domain data (domain nameservers,
zone and linked record name) to public methods. The tuple parameters
are empty if not existent or specified. Exits session and raises error
if provider fails during session.
"""
name, link = self._link_record()
qdomain, nameservers, cname = Provider._get_dns_cname(
(name if name else domain + '.'), link)
qdomain_id, zone = domain_id, None
self.session = self._auth_session(self.username, self.password)
try:
if not domain_id or qdomain != domain:
qdomain_id = self._get_domain_id(qdomain)
if qdomain == domain:
self.domain_id = qdomain_id
if get_zone:
zone = self._get_zone(qdomain, qdomain_id)
yield {'nameservers': nameservers, 'zone': zone, 'cname': cname}
except Exception as exc:
raise exc
finally:
self._exit_session()
def _auth_session(self, username, password):
"""
Creates session to Hetzner account, authenticates with given credentials and
returns the session, if authentication was successful. Otherwise raises error.
"""
api = self.api[self.account]['auth']
endpoint = api.get('endpoint', self.api[self.account]['endpoint'])
session = requests.Session()
session_retries = Retry(total=10, backoff_factor=0.5)
session_adapter = requests.adapters.HTTPAdapter(max_retries=session_retries)
session.mount('https://', session_adapter)
response = session.request('GET', endpoint + api['GET'].get('url', '/'))
dom = Provider._filter_dom(response.text, api['filter'])
data = Provider._extract_hidden_data(dom)
data[api['user']], data[api['pass']] = username, password
response = session.request('POST', endpoint + api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to authenticate session with %s account \'%s\': '
'Invalid credentials',
self.account, username)
raise AssertionError
LOGGER.info('Hetzner => Authenticate session with %s account \'%s\'',
self.account, username)
return session
def _exit_session(self):
"""
Exits session to Hetzner account and returns.
"""
api = self.api[self.account]
response = self._get(api['exit']['GET']['url'])
if not Provider._filter_dom(response.text, api['filter']):
LOGGER.info('Hetzner => Exit session')
else:
LOGGER.warning('Hetzner => Unable to exit session')
self.session = None
return True
def _get_domain_id(self, domain):
"""
Pulls all domains managed by authenticated Hetzner account, extracts their IDs
and returns the ID for the current domain, if exists. Otherwise raises error.
"""
api = self.api[self.account]['domain_id']
qdomain = dns.name.from_text(domain).to_unicode(True)
domains, last_count, page = {}, -1, 0
while last_count != len(domains):
last_count = len(domains)
page += 1
url = (api['GET'].copy()).get('url', '/').replace('<index>', str(page))
params = api['GET'].get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<index>', str(page))
response = self._get(url, query_params=params)
domain_tags = Provider._filter_dom(response.text, api['filter'], True)
for domain_tag in domain_tags:
domain_id = Provider._extract_domain_id(dict(domain_tag.attrs)[api['id']['attr']],
api['id']['regex'])
domain = (Provider._filter_dom(domain_tag, api['domain'])
.renderContents().decode('UTF-8'))
domains[domain] = domain_id
if domain == qdomain:
LOGGER.info('Hetzner => Get ID %s for domain %s', domain_id, qdomain)
return domain_id
LOGGER.error('Hetzner => ID for domain %s does not exists', qdomain)
raise AssertionError
def _get_zone(self, domain, domain_id):
"""
Pulls the zone for the current domain from authenticated Hetzner account and
returns it as an zone object.
"""
api = self.api[self.account]
for request in api['zone']['GET']:
url = (request.copy()).get('url', '/').replace('<id>', domain_id)
params = request.get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<id>', domain_id)
response = self._get(url, query_params=params)
dom = Provider._filter_dom(response.text, api['filter'])
zone_file_filter = [{'name': 'textarea', 'attrs': {'name': api['zone']['file']}}]
zone_file = Provider._filter_dom(dom, zone_file_filter).renderContents().decode('UTF-8')
hidden = Provider._extract_hidden_data(dom)
zone = {'data': dns.zone.from_text(zone_file, origin=domain, relativize=False),
'hidden': hidden}
LOGGER.info('Hetzner => Get zone for domain %s', domain)
return zone
def _post_zone(self, zone):
"""
Pushes updated zone for current domain to authenticated Hetzner account and
returns a boolean, if update was successful or not. Furthermore, waits until
the zone has been taken over, if it is a Hetzner Robot account.
"""
api = self.api[self.account]['zone']
data = zone['hidden']
data[api['file']] = zone['data'].to_text(relativize=True)
response = self._post(api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to update zone for domain %s: Syntax error\n\n%s',
zone['data'].origin.to_unicode(True),
zone['data'].to_text(relativize=True).decode('UTF-8'))
return False
LOGGER.info('Hetzner => Update zone for domain %s',
zone['data'].origin.to_unicode(True))
if self.account == 'robot':
latency = self._get_provider_option('latency')
LOGGER.info('Hetzner => Wait %ds until Hetzner Robot has taken over zone...',
latency)
time.sleep(latency)
return True
<|code_end|>
setup.py
<|code_start|>"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, listdir
version = 'unknown'
with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION'), encoding='utf-8') as version_file:
version = version_file.read().strip()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get a list of all the providers
current_filepath = path.join(here, 'lexicon', 'providers')
providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
providers = list(sorted(set(providers)))
providers.remove('base')
providers.remove('__init__')
# Define optional dependencies for specific providers.
# Each key of the dict should match a provider name.
extras_require = {
'namecheap': ['PyNamecheap'],
'route53': ['boto3'],
'softlayer': ['SoftLayer'],
'subreg': ['zeep'],
'gransy': ['zeep'],
'transip': ['transip>=0.3.0'],
'plesk': ['xmltodict'],
'henet': ['beautifulsoup4'],
'hetzner': ['dnspython>=1.15.0', 'beautifulsoup4'],
'easyname': ['beautifulsoup4'],
'localzone': ['localzone'],
'gratisdns': ['beautifulsoup4'],
# Define dev/test dependencies
'dev': [
'pytest>=5.2 ; python_version >= "3.0"',
'pytest>=4.6 ; python_version >= "2.7"',
'pytest-cov>=2.8',
'pytest-xdist>=1.30',
'vcrpy>=2.1',
'mock>=3.0',
]
}
# Add a 'full' extra, gathering all external dependencies for providers
extras_require['full'] = [dep for name, deps in extras_require.items() if name != 'dev' for dep in deps]
setup(
name='dns-lexicon',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
long_description=long_description,
long_description_content_type="text/markdown",
# The project's main homepage.
url='https://github.com/AnalogJ/lexicon',
# Author details
author='Jason Kulatunga',
author_email='jason@thesparktree.com',
license='MIT',
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'requests[security]',
'tldextract',
'future',
'cryptography',
'pyyaml',
],
extras_require=extras_require,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'lexicon=lexicon.cli:main',
],
},
test_suite='tests'
)
<|code_end|>
| lexicon/providers/hetzner.py
<|code_start|>"""Module provider for Hetzner"""
from __future__ import absolute_import
from __future__ import unicode_literals
from contextlib import contextmanager
import hashlib
import logging
import re
import time
import requests
from six import string_types
from urllib3.util.retry import Retry
# Due to optional requirement
try:
from bs4 import BeautifulSoup
import dns.resolver
import dns.zone
except ImportError:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = []
def provider_parser(subparser):
"""Configure a provider parser for Hetzner"""
subparser.add_argument('--auth-account',
help='specify type of Hetzner account: by default Hetzner Robot '
'(robot) or Hetzner konsoleH (konsoleh)')
subparser.add_argument('--auth-username', help='specify username of Hetzner account')
subparser.add_argument('--auth-password', help='specify password of Hetzner account')
subparser.add_argument('--linked',
help='if exists, uses linked CNAME as A|AAAA|TXT record name for edit '
'actions: by default (yes); Further restriction: Only enabled if '
'record name or raw FQDN record identifier \'type/name/content\' is '
'specified, and additionally for update actions the record name '
'remains the same',
default=str('yes'),
choices=['yes', 'no'])
subparser.add_argument('--propagated',
help='waits until record is publicly propagated after succeeded '
'create|update actions: by default (yes)',
default=str('yes'),
choices=['yes', 'no'])
subparser.add_argument('--latency',
help='specify latency, used during checks for publicly propagation '
'and additionally for Hetzner Robot after record edits: by default '
'30s (30)',
default=int(30),
type=int)
class Provider(BaseProvider):
"""
Implements the Hetzner DNS Provider.
There are two variants to manage DNS records on Hetzner: Hetzner Robot or
Hetzner konsoleH. Both do not provide a common API, therefore this provider
implements missing read and write methods in a generic way. For editing DNS
records on Hetzner, this provider manipulates and replaces the whole DNS zone.
Furthermore, there is no unique identifier to each record in the way that Lexicon
expects, why this provider implements a pseudo-identifer based on the record type,
name and content for use of the --identifier parameter. Supported identifier
formats are:
- hash generated|verified by 'list' command; e.g. '30fa112'
- raw concatenation of the record type, name (FQDN) and content (if possible
FQDN) with delimiter '/'; e.g. 'SRV/example.com./0 0 443 msx.example.com.'
or 'TXT/example.com./challengetoken'
Additional, this provider implements the option of replacing an A, AAAA or TXT record
name with an existent linked CNAME for edit actions via the --linked parameter and
the option of waiting until record is publicly propagated after succeeded create or
update actions via the --propagated parameter. As further restriction, the use of a
linked CNAME is only enabled if the record type & record name or the raw identifier are
specified, and additionally for the update action the record name remains the same.
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api = {
'robot': {
'endpoint': 'https://robot.your-server.de',
'filter': [{'name': 'div', 'attrs': {'id': 'center_col'}}],
'auth': {
'endpoint': 'https://accounts.hetzner.com',
'GET': {'url': '/login'},
'POST': {'url': '/login_check'},
'filter': [{'name': 'form', 'attrs': {'id': 'login-form'}}],
'user': '_username',
'pass': '_password'
},
'exit': {
'GET': {'url': '/login/logout/r/true'}
},
'domain_id': {
'GET': {'url': '/dns/index/page/<index>'},
'filter': [
{'name': 'div', 'attrs': {'id': 'center_col'}},
{'name': 'table', 'attrs': {'class': 'box_title'}}
],
'domain': [{'name': 'td', 'attrs': {'class': 'title'}}],
'id': {'attr': 'onclick', 'regex': r'\'(\d+)\''}
},
'zone': {
'GET': [{'url': '/dns/update/id/<id>'}],
'POST': {'url': '/dns/update'},
'filter': [
{'name': 'div', 'attrs': {'id': 'center_col'}},
{'name': 'ul', 'attrs': {'class': 'error_list'}}
],
'file': 'zonefile'
}
},
'konsoleh': {
'endpoint': 'https://konsoleh.your-server.de',
'filter': [{'name': 'div', 'attrs': {'id': 'content'}}],
'auth': {
'GET': {},
'POST': {'url': '/login.php'},
'filter': [{'name': 'form', 'attrs': {'id': 'loginform'}}],
'user': 'login_user_inputbox',
'pass': 'login_pass_inputbox'
},
'exit': {
'GET': {'url': '/logout.php'}
},
'domain_id': {
'GET': {'params': {'page': '<index>'}},
'filter': [
{'name': 'div', 'attrs': {'id': 'domainlist'}},
{'name': 'dl'},
{'name': 'a'}
],
'domain': [{'name': 'strong'}],
'id': {'attr': 'href', 'regex': r'=(D\d+)'}
},
'zone': {
'GET': [
{'params': {'domain_number': '<id>'}},
{'url': '/dns.php', 'params': {'dnsaction2': 'editintextarea'}}
],
'POST': {'url': '/dns.php'},
'filter': [
{'name': 'div', 'attrs': {'id': 'content'}},
{'name': 'div', 'attrs': {'class': 'error'}}
],
'file': 'zone_file1'
}
}
}
self.session = None
self.account = self._get_provider_option('auth_account')
if self.account in (None, 'robot', 'konsoleh'):
self.account = self.account if self.account else 'robot'
else:
LOGGER.error('Hetzner => Argument for --auth-account is invalid: \'%s\' '
'(choose from \'robot\' or \'konsoleh\')', self.account)
raise AssertionError
self.username = self._get_provider_option('auth_username')
assert self.username is not None
self.password = self._get_provider_option('auth_password')
assert self.password is not None
def _authenticate(self):
"""
Connects to Hetzner account and returns, if authentification was
successful and the domain or CNAME target is managed by this account.
"""
with self._session(self.domain, get_zone=False):
return True
def _create_record(self, rtype, name, content):
"""
Connects to Hetzner account, adds a new record to the zone and returns a
boolean, if creation was successful or not. Needed record rtype, name and
content for record to create.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if not rtype or not name or not content:
LOGGER.warning('Hetzner => Record has no rtype|name|content specified')
return False
# Add record to zone
name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name)
rrset = ddata['zone']['data'].get_rdataset(name, rdtype=rtype, create=True)
for rdata in rrset:
if self._convert_content(rtype, content) == rdata.to_text():
LOGGER.info('Hetzner => Record with content \'%s\' already exists',
content)
return True
ttl = (rrset.ttl if 0 < rrset.ttl < self._get_lexicon_option('ttl')
else self._get_lexicon_option('ttl'))
rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype,
ttl, self._convert_content(rtype, content))
rrset.update(rdataset)
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
if synced_change:
self._propagated_record(rtype, name, self._convert_content(rtype, content),
ddata['nameservers'])
return synced_change
def _list_records(self, rtype=None, name=None, content=None):
"""
Connects to Hetzner account and returns a list of records filtered by record
rtype, name and content. The list is empty if no records found.
"""
with self._session(self.domain, self.domain_id) as ddata:
name = self._fqdn_name(name) if name else None
return self._list_records_in_zone(ddata['zone']['data'], rtype, name, content)
def _update_record(self, identifier=None, rtype=None, name=None, content=None): # pylint: disable=too-many-locals,too-many-branches
"""
Connects to Hetzner account, changes an existing record and returns a boolean,
if update was successful or not. Needed identifier or rtype & name to lookup
over all records of the zone for exactly one record to update.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if identifier:
dtype, dname, dcontent = self._parse_identifier(identifier, ddata['zone']['data'])
if dtype and dname and dcontent:
rtype = rtype if rtype else dtype
name = name if name else dname
content = content if content else dcontent
else:
LOGGER.warning('Hetzner => Record with identifier \'%s\' does not exist',
identifier)
return False
elif rtype and name and content:
dtype, dname, dcontent = rtype, name, None
else:
LOGGER.warning('Hetzner => Record has no rtype|name|content specified')
return False
dname = ddata['cname'] if ddata['cname'] else self._fqdn_name(dname)
records = self._list_records_in_zone(ddata['zone']['data'], dtype, dname, dcontent)
if len(records) == 1:
# Remove record from zone
rrset = ddata['zone']['data'].get_rdataset(records[0]['name'] + '.',
rdtype=records[0]['type'])
rdatas = []
for rdata in rrset:
if self._convert_content(records[0]['type'],
records[0]['content']) != rdata.to_text():
rdatas.append(rdata.to_text())
if rdatas:
rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype,
records[0]['ttl'], rdatas)
ddata['zone']['data'].replace_rdataset(records[0]['name'] + '.', rdataset)
else:
ddata['zone']['data'].delete_rdataset(records[0]['name'] + '.',
records[0]['type'])
# Add record to zone
name = ddata['cname'] if ddata['cname'] else self._fqdn_name(name)
rrset = ddata['zone']['data'].get_rdataset(name, rdtype=rtype, create=True)
synced_change = False
for rdata in rrset:
if self._convert_content(rtype, content) == rdata.to_text():
LOGGER.info('Hetzner => Record with content \'%s\' already exists',
content)
synced_change = True
break
if not synced_change:
ttl = (rrset.ttl if 0 < rrset.ttl < self._get_lexicon_option('ttl')
else self._get_lexicon_option('ttl'))
rdataset = dns.rdataset.from_text(rrset.rdclass, rrset.rdtype, ttl,
self._convert_content(rtype, content))
rrset.update(rdataset)
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
if synced_change:
self._propagated_record(rtype, name, self._convert_content(rtype, content),
ddata['nameservers'])
return synced_change
LOGGER.warning('Hetzner => Record lookup has not only one match')
return False
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""
Connects to Hetzner account, removes an existing record from the zone and returns a
boolean, if deletion was successful or not. Uses identifier or rtype, name & content to
lookup over all records of the zone for one or more records to delete.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if identifier:
rtype, name, content = self._parse_identifier(identifier, ddata['zone']['data'])
if rtype is None or name is None or content is None:
LOGGER.info('Hetzner => Record with identifier \'%s\' does not exist',
identifier)
return True
name = ddata['cname'] if ddata['cname'] else (self._fqdn_name(name) if name else None)
records = self._list_records_in_zone(ddata['zone']['data'], rtype, name, content)
if records:
# Remove records from zone
for record in records:
rrset = ddata['zone']['data'].get_rdataset(record['name'] + '.',
rdtype=record['type'])
rdatas = []
for rdata in rrset:
if self._convert_content(record['type'],
record['content']) != rdata.to_text():
rdatas.append(rdata.to_text())
if rdatas:
rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype,
record['ttl'], rdatas)
ddata['zone']['data'].replace_rdataset(record['name'] + '.', rdataset)
else:
ddata['zone']['data'].delete_rdataset(record['name'] + '.', record['type'])
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
return synced_change
LOGGER.info('Hetzner => Record lookup has no matches')
return True
###############################################################################
# Provider base helpers
###############################################################################
@staticmethod
def _create_identifier(rdtype, name, content):
"""
Creates hashed identifier based on full qualified record type, name & content
and returns hash.
"""
sha256 = hashlib.sha256()
sha256.update((rdtype + '/').encode('UTF-8'))
sha256.update((name + '/').encode('UTF-8'))
sha256.update(content.encode('UTF-8'))
return sha256.hexdigest()[0:7]
def _parse_identifier(self, identifier, zone=None):
"""
Parses the record identifier and returns type, name & content of the associated record
as tuple. The tuple is empty if no associated record found.
"""
rdtype, name, content = None, None, None
if len(identifier) > 7:
parts = identifier.split('/')
rdtype, name, content = parts[0], parts[1], '/'.join(parts[2:])
else:
records = self._list_records_in_zone(zone)
for record in records:
if record['id'] == identifier:
rdtype, name, content = record['type'], record['name'] + '.', record['content']
return rdtype, name, content
def _convert_content(self, rdtype, content):
"""
Converts type dependent record content into well formed and fully qualified
content for domain zone and returns content.
"""
if rdtype == 'TXT':
if content[0] != '"':
content = '"' + content
if content[-1] != '"':
content += '"'
if rdtype in ('CNAME', 'MX', 'NS', 'SRV'):
if content[-1] != '.':
content = self._fqdn_name(content)
return content
def _list_records_in_zone(self, zone, rdtype=None, name=None, content=None):
"""
Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found.
"""
records = []
rrsets = zone.iterate_rdatasets() if zone else []
for rname, rdataset in rrsets:
rtype = dns.rdatatype.to_text(rdataset.rdtype)
if ((not rdtype or rdtype == rtype)
and (not name or name == rname.to_text())):
for rdata in rdataset:
rdata = rdata.to_text()
if not content or self._convert_content(rtype, content) == rdata:
raw_rdata = self._clean_TXT_record({'type': rtype,
'content': rdata})['content']
data = {
'type': rtype,
'name': rname.to_text(True),
'ttl': int(rdataset.ttl),
'content': raw_rdata,
'id': Provider._create_identifier(rtype, rname.to_text(), raw_rdata)
}
records.append(data)
return records
def _request(self, action='GET', url='/', data=None, query_params=None):
"""
Requests to Hetzner by current session and returns the response.
"""
if data is None:
data = {}
if query_params is None:
query_params = {}
response = self.session.request(action, self.api[self.account]['endpoint'] + url,
params=query_params, data=data)
response.raise_for_status()
return response
###############################################################################
# Provider option helpers
###############################################################################
@staticmethod
def _dns_lookup(name, rdtype, nameservers=None):
"""
Looks on specified or default system domain nameservers to resolve record type
& name and returns record set. The record set is empty if no propagated
record found.
"""
rrset = dns.rrset.from_text(name, 0, 1, rdtype)
try:
resolver = dns.resolver.Resolver()
resolver.lifetime = 1
if nameservers:
resolver.nameservers = nameservers
rrset = resolver.query(name, rdtype)
for rdata in rrset:
LOGGER.debug('DNS Lookup => %s %s %s %s',
rrset.name.to_text(), dns.rdataclass.to_text(rrset.rdclass),
dns.rdatatype.to_text(rrset.rdtype), rdata.to_text())
except dns.exception.DNSException as error:
LOGGER.debug('DNS Lookup => %s', error)
return rrset
@staticmethod
def _get_nameservers(domain):
"""
Looks for domain nameservers and returns the IPs of the nameservers as a list.
The list is empty, if no nameservers were found. Needed associated domain zone
name for lookup.
"""
nameservers = []
rdtypes_ns = ['SOA', 'NS']
rdtypes_ip = ['A', 'AAAA']
for rdtype_ns in rdtypes_ns:
for rdata_ns in Provider._dns_lookup(domain, rdtype_ns):
for rdtype_ip in rdtypes_ip:
for rdata_ip in Provider._dns_lookup(rdata_ns.to_text().split(' ')[0],
rdtype_ip):
if rdata_ip.to_text() not in nameservers:
nameservers.append(rdata_ip.to_text())
LOGGER.debug('DNS Lookup => %s IN NS %s', domain, ' '.join(nameservers))
return nameservers
@staticmethod
def _get_dns_cname(name, link=False):
"""
Looks for associated domain zone, nameservers and linked record name until no
more linked record name was found for the given fully qualified record name or
the CNAME lookup was disabled, and then returns the parameters as a tuple.
"""
resolver = dns.resolver.Resolver()
resolver.lifetime = 1
domain = dns.resolver.zone_for_name(name, resolver=resolver).to_text(True)
nameservers = Provider._get_nameservers(domain)
cname = None
links, max_links = 0, 5
while link:
if links >= max_links:
LOGGER.error('Hetzner => Record %s has more than %d linked CNAME '
'records. Reduce the amount of CNAME links!',
name, max_links)
raise AssertionError
qname = cname if cname else name
rrset = Provider._dns_lookup(qname, 'CNAME', nameservers)
if rrset:
links += 1
cname = rrset[0].to_text()
qdomain = dns.resolver.zone_for_name(cname, resolver=resolver).to_text(True)
if domain != qdomain:
domain = qdomain
nameservers = Provider._get_nameservers(qdomain)
else:
link = False
if cname:
LOGGER.info('Hetzner => Record %s has CNAME %s', name, cname)
return domain, nameservers, cname
def _link_record(self):
"""
Checks restrictions for use of CNAME lookup and returns a tuple of the
fully qualified record name to lookup and a boolean, if a CNAME lookup
should be done or not. The fully qualified record name is empty if no
record name is specified by this provider.
"""
action = self._get_lexicon_option('action')
identifier = self._get_lexicon_option('identifier')
rdtype = self._get_lexicon_option('type')
name = (self._fqdn_name(self._get_lexicon_option('name'))
if self._get_lexicon_option('name') else None)
link = self._get_provider_option('linked')
qname = name
if identifier:
rdtype, name, _ = self._parse_identifier(identifier)
if action != 'list' and rdtype in ('A', 'AAAA', 'TXT') and name and link == 'yes':
if action != 'update' or name == qname or not qname:
LOGGER.info('Hetzner => Enable CNAME lookup '
'(see --linked parameter)')
return name, True
LOGGER.info('Hetzner => Disable CNAME lookup '
'(see --linked parameter)')
return name, False
def _propagated_record(self, rdtype, name, content, nameservers=None):
"""
If the publicly propagation check should be done, waits until the domain nameservers
responses with the propagated record type, name & content and returns a boolean,
if the publicly propagation was successful or not.
"""
latency = self._get_provider_option('latency')
propagated = self._get_provider_option('propagated')
if propagated == 'yes':
retry, max_retry = 0, 20
while retry < max_retry:
for rdata in Provider._dns_lookup(name, rdtype, nameservers):
if content == rdata.to_text():
LOGGER.info('Hetzner => Record %s has %s %s', name, rdtype, content)
return True
retry += 1
retry_log = (', retry ({}/{}) in {}s...'.format((retry + 1), max_retry, latency)
if retry < max_retry else '')
LOGGER.info('Hetzner => Record is not propagated%s', retry_log)
time.sleep(latency)
return False
###############################################################################
# Hetzner API helpers
###############################################################################
@staticmethod
def _filter_dom(dom, filters, last_find_all=False):
"""
If not exists, creates an DOM from a given session response, then filters the DOM
via given API filters and returns the filtered DOM. The DOM is empty if the filters
have no match.
"""
if isinstance(dom, string_types):
dom = BeautifulSoup(dom, 'html5lib')
for idx, find in enumerate(filters, start=1):
if not dom:
break
name, attrs = find.get('name'), find.get('attrs', {})
if len(filters) == idx and last_find_all:
dom = dom.find_all(name, attrs=attrs) if name else dom.find_all(attrs=attrs)
else:
dom = dom.find(name, attrs=attrs) if name else dom.find(attrs=attrs)
return dom
@staticmethod
def _extract_hidden_data(dom):
"""
Extracts hidden input data from DOM and returns the data as dictionary.
"""
input_tags = dom.find_all('input', attrs={'type': 'hidden'})
data = {}
for input_tag in input_tags:
data[input_tag['name']] = input_tag['value']
return data
@staticmethod
def _extract_domain_id(string, regex):
"""
Extracts domain ID from given string and returns the domain ID.
"""
regex = re.compile(regex)
match = regex.search(string)
if not match:
return False
return str(match.group(1))
@contextmanager
def _session(self, domain, domain_id=None, get_zone=True):
"""
Generates, authenticates and exits session to Hetzner account, and
provides tuple of additional needed domain data (domain nameservers,
zone and linked record name) to public methods. The tuple parameters
are empty if not existent or specified. Exits session and raises error
if provider fails during session.
"""
name, link = self._link_record()
qdomain, nameservers, cname = Provider._get_dns_cname(
(name if name else domain + '.'), link)
qdomain_id, zone = domain_id, None
self.session = self._auth_session(self.username, self.password)
try:
if not domain_id or qdomain != domain:
qdomain_id = self._get_domain_id(qdomain)
if qdomain == domain:
self.domain_id = qdomain_id
if get_zone:
zone = self._get_zone(qdomain, qdomain_id)
yield {'nameservers': nameservers, 'zone': zone, 'cname': cname}
except Exception as exc:
raise exc
finally:
self._exit_session()
def _auth_session(self, username, password):
"""
Creates session to Hetzner account, authenticates with given credentials and
returns the session, if authentication was successful. Otherwise raises error.
"""
api = self.api[self.account]['auth']
endpoint = api.get('endpoint', self.api[self.account]['endpoint'])
session = requests.Session()
session_retries = Retry(total=10, backoff_factor=0.5)
session_adapter = requests.adapters.HTTPAdapter(max_retries=session_retries)
session.mount('https://', session_adapter)
response = session.request('GET', endpoint + api['GET'].get('url', '/'))
dom = Provider._filter_dom(response.text, api['filter'])
data = Provider._extract_hidden_data(dom)
data[api['user']], data[api['pass']] = username, password
response = session.request('POST', endpoint + api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to authenticate session with %s account \'%s\': '
'Invalid credentials',
self.account, username)
raise AssertionError
LOGGER.info('Hetzner => Authenticate session with %s account \'%s\'',
self.account, username)
return session
def _exit_session(self):
"""
Exits session to Hetzner account and returns.
"""
api = self.api[self.account]
response = self._get(api['exit']['GET']['url'])
if not Provider._filter_dom(response.text, api['filter']):
LOGGER.info('Hetzner => Exit session')
else:
LOGGER.warning('Hetzner => Unable to exit session')
self.session = None
return True
def _get_domain_id(self, domain):
"""
Pulls all domains managed by authenticated Hetzner account, extracts their IDs
and returns the ID for the current domain, if exists. Otherwise raises error.
"""
api = self.api[self.account]['domain_id']
qdomain = dns.name.from_text(domain).to_unicode(True)
domains, last_count, page = {}, -1, 0
while last_count != len(domains):
last_count = len(domains)
page += 1
url = (api['GET'].copy()).get('url', '/').replace('<index>', str(page))
params = api['GET'].get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<index>', str(page))
response = self._get(url, query_params=params)
domain_tags = Provider._filter_dom(response.text, api['filter'], True)
for domain_tag in domain_tags:
domain_id = Provider._extract_domain_id(dict(domain_tag.attrs)[api['id']['attr']],
api['id']['regex'])
domain = (Provider._filter_dom(domain_tag, api['domain'])
.renderContents().decode('UTF-8'))
domains[domain] = domain_id
if domain == qdomain:
LOGGER.info('Hetzner => Get ID %s for domain %s', domain_id, qdomain)
return domain_id
LOGGER.error('Hetzner => ID for domain %s does not exists', qdomain)
raise AssertionError
def _get_zone(self, domain, domain_id):
"""
Pulls the zone for the current domain from authenticated Hetzner account and
returns it as an zone object.
"""
api = self.api[self.account]
for request in api['zone']['GET']:
url = (request.copy()).get('url', '/').replace('<id>', domain_id)
params = request.get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<id>', domain_id)
response = self._get(url, query_params=params)
dom = Provider._filter_dom(response.text, api['filter'])
zone_file_filter = [{'name': 'textarea', 'attrs': {'name': api['zone']['file']}}]
zone_file = Provider._filter_dom(dom, zone_file_filter).renderContents().decode('UTF-8')
hidden = Provider._extract_hidden_data(dom)
zone = {'data': dns.zone.from_text(zone_file, origin=domain, relativize=False),
'hidden': hidden}
LOGGER.info('Hetzner => Get zone for domain %s', domain)
return zone
def _post_zone(self, zone):
"""
Pushes updated zone for current domain to authenticated Hetzner account and
returns a boolean, if update was successful or not. Furthermore, waits until
the zone has been taken over, if it is a Hetzner Robot account.
"""
api = self.api[self.account]['zone']
data = zone['hidden']
data[api['file']] = zone['data'].to_text(relativize=True)
response = self._post(api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to update zone for domain %s: Syntax error\n\n%s',
zone['data'].origin.to_unicode(True),
zone['data'].to_text(relativize=True).decode('UTF-8'))
return False
LOGGER.info('Hetzner => Update zone for domain %s',
zone['data'].origin.to_unicode(True))
if self.account == 'robot':
latency = self._get_provider_option('latency')
LOGGER.info('Hetzner => Wait %ds until Hetzner Robot has taken over zone...',
latency)
time.sleep(latency)
return True
<|code_end|>
setup.py
<|code_start|>"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, listdir
version = 'unknown'
with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION'), encoding='utf-8') as version_file:
version = version_file.read().strip()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get a list of all the providers
current_filepath = path.join(here, 'lexicon', 'providers')
providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
providers = list(sorted(set(providers)))
providers.remove('base')
providers.remove('__init__')
# Define optional dependencies for specific providers.
# Each key of the dict should match a provider name.
extras_require = {
'namecheap': ['PyNamecheap'],
'route53': ['boto3'],
'softlayer': ['SoftLayer'],
'subreg': ['zeep'],
'gransy': ['zeep'],
'transip': ['transip>=0.3.0'],
'plesk': ['xmltodict'],
'henet': ['beautifulsoup4'],
'hetzner': ['dnspython>=1.15.0', 'beautifulsoup4', 'html5lib'],
'easyname': ['beautifulsoup4'],
'localzone': ['localzone'],
'gratisdns': ['beautifulsoup4'],
# Define dev/test dependencies
'dev': [
'pytest>=5.2 ; python_version >= "3.0"',
'pytest>=4.6 ; python_version >= "2.7"',
'pytest-cov>=2.8',
'pytest-xdist>=1.30',
'vcrpy>=2.1',
'mock>=3.0',
]
}
# Add a 'full' extra, gathering all external dependencies for providers
extras_require['full'] = [dep for name, deps in extras_require.items() if name != 'dev' for dep in deps]
setup(
name='dns-lexicon',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
long_description=long_description,
long_description_content_type="text/markdown",
# The project's main homepage.
url='https://github.com/AnalogJ/lexicon',
# Author details
author='Jason Kulatunga',
author_email='jason@thesparktree.com',
license='MIT',
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'requests[security]',
'tldextract',
'future',
'cryptography',
'pyyaml',
],
extras_require=extras_require,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'lexicon=lexicon.cli:main',
],
},
test_suite='tests'
)
<|code_end|>
|
Cloudflare provider only lists first DNS 100 entries for domain
How to reproduce:
* Have a CloudFlare domain with > 100 entries
* Create a new entry starting with a "high" letter, e.g. `zzzz.<domain>`
* List entries: only the first 100 entries will be returned, making it appear as though `zzzz` had not been created.
```
def _list_records(self, rtype=None, name=None, content=None):
filter_obj = {'per_page': 100} ####### NOTE
if rtype:
filter_obj['type'] = rtype
if name:
filter_obj['name'] = self._full_name(name)
if content:
filter_obj['content'] = content
payload = self._get(
'/zones/{0}/dns_records'.format(self.domain_id), filter_obj)
### subsequent pages not retrieved
records = []
for record in payload['result']:
processed_record = {
'type': record['type'],
'name': record['name'],
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']
}
records.append(processed_record)
LOGGER.debug('list_records: %s', records)
return records
```
This causes weird errors whereby an existing entry is not returned if falls after the 100th entry.
Without specifying the "per page" parameter, only the first 20 entries are returned.
Besides a quick-n-dirty solution of raising that number to e.g. 1000, the proper solution would be to implement proper pagination support for the _get method.
| lexicon/providers/cloudflare.py
<|code_start|>"""Module provider for Cloudflare"""
from __future__ import absolute_import
import json
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['cloudflare.com']
def provider_parser(subparser):
"""Return the parser for this provider"""
subparser.add_argument(
"--auth-username", help="specify email address for authentication")
subparser.add_argument(
"--auth-token", help="specify token for authentication")
class Provider(BaseProvider):
"""Provider class for Cloudflare"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = 'https://api.cloudflare.com/client/v4'
def _authenticate(self):
payload = self._get('/zones', {
'name': self.domain,
'status': 'active'
})
if not payload['result']:
raise Exception('No domain found')
if len(payload['result']) > 1:
raise Exception('Too many domains found. This should not happen')
self.domain_id = payload['result'][0]['id']
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
data = {'type': rtype, 'name': self._full_name(
name), 'content': content}
if self._get_lexicon_option('ttl'):
data['ttl'] = self._get_lexicon_option('ttl')
payload = {'success': True}
try:
payload = self._post(
'/zones/{0}/dns_records'.format(self.domain_id), data)
except requests.exceptions.HTTPError as err:
already_exists = next((True for error in err.response.json()[
'errors'] if error['code'] == 81057), False)
if not already_exists:
raise
LOGGER.debug('create_record: %s', payload['success'])
return payload['success']
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
filter_obj = {'per_page': 100}
if rtype:
filter_obj['type'] = rtype
if name:
filter_obj['name'] = self._full_name(name)
if content:
filter_obj['content'] = content
payload = self._get(
'/zones/{0}/dns_records'.format(self.domain_id), filter_obj)
records = []
for record in payload['result']:
processed_record = {
'type': record['type'],
'name': record['name'],
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']
}
records.append(processed_record)
LOGGER.debug('list_records: %s', records)
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {}
if rtype:
data['type'] = rtype
if name:
data['name'] = self._full_name(name)
if content:
data['content'] = content
if self._get_lexicon_option('ttl'):
data['ttl'] = self._get_lexicon_option('ttl')
payload = self._put(
'/zones/{0}/dns_records/{1}'.format(self.domain_id, identifier), data)
LOGGER.debug('update_record: %s', payload['success'])
return payload['success']
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete(
'/zones/{0}/dns_records/{1}'.format(self.domain_id, record_id))
LOGGER.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers={
'X-Auth-Email': self._get_provider_option('auth_username'),
'X-Auth-Key': self._get_provider_option('auth_token'),
'Content-Type': 'application/json'
})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
<|code_end|>
| lexicon/providers/cloudflare.py
<|code_start|>"""Module provider for Cloudflare"""
from __future__ import absolute_import
import json
import logging
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['cloudflare.com']
def provider_parser(subparser):
"""Return the parser for this provider"""
subparser.add_argument(
"--auth-username", help="specify email address for authentication")
subparser.add_argument(
"--auth-token", help="specify token for authentication")
class Provider(BaseProvider):
"""Provider class for Cloudflare"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = 'https://api.cloudflare.com/client/v4'
def _authenticate(self):
payload = self._get('/zones', {
'name': self.domain,
'status': 'active'
})
if not payload['result']:
raise Exception('No domain found')
if len(payload['result']) > 1:
raise Exception('Too many domains found. This should not happen')
self.domain_id = payload['result'][0]['id']
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
data = {'type': rtype, 'name': self._full_name(
name), 'content': content}
if self._get_lexicon_option('ttl'):
data['ttl'] = self._get_lexicon_option('ttl')
payload = {'success': True}
try:
payload = self._post(
'/zones/{0}/dns_records'.format(self.domain_id), data)
except requests.exceptions.HTTPError as err:
already_exists = next((True for error in err.response.json()[
'errors'] if error['code'] == 81057), False)
if not already_exists:
raise
LOGGER.debug('create_record: %s', payload['success'])
return payload['success']
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
filter_obj = {'per_page': 100}
if rtype:
filter_obj['type'] = rtype
if name:
filter_obj['name'] = self._full_name(name)
if content:
filter_obj['content'] = content
records = []
while True:
payload = self._get(
'/zones/{0}/dns_records'.format(self.domain_id), filter_obj)
LOGGER.debug("payload: %s", payload)
for record in payload['result']:
processed_record = {
'type': record['type'],
'name': record['name'],
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']
}
records.append(processed_record)
pages = payload['result_info']['total_pages']
page = payload['result_info']['page']
if page >= pages:
break
filter_obj['page'] = page + 1
LOGGER.debug('list_records: %s', records)
LOGGER.debug('Number of records retrieved: %d', len(records))
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
data = {}
if rtype:
data['type'] = rtype
if name:
data['name'] = self._full_name(name)
if content:
data['content'] = content
if self._get_lexicon_option('ttl'):
data['ttl'] = self._get_lexicon_option('ttl')
payload = self._put(
'/zones/{0}/dns_records/{1}'.format(self.domain_id, identifier), data)
LOGGER.debug('update_record: %s', payload['success'])
return payload['success']
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
self._delete(
'/zones/{0}/dns_records/{1}'.format(self.domain_id, record_id))
LOGGER.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers={
'X-Auth-Email': self._get_provider_option('auth_username'),
'X-Auth-Key': self._get_provider_option('auth_token'),
'Content-Type': 'application/json'
})
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
<|code_end|>
|
Easyname not working anymore
Lexicon version: 3.3.27
I can't access anything on easyname right now:
```
Traceback (most recent call last):
File "/usr/local/bin/lexicon", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.6/dist-packages/lexicon/cli.py", line 117, in main
results = client.execute()
File "/usr/local/lib/python3.6/dist-packages/lexicon/client.py", line 87, in execute
return self.provider.list_records(record_type, name, content)
File "/usr/local/lib/python3.6/dist-packages/lexicon/providers/base.py", line 94, in list_records
return self._list_records(rtype=rtype, name=name, content=content)
File "/usr/local/lib/python3.6/dist-packages/lexicon/providers/easyname.py", line 190, in _list_records
return self._list_records_internal(rtype=rtype, name=name, content=content)
File "/usr/local/lib/python3.6/dist-packages/lexicon/providers/easyname.py", line 217, in _list_records_internal
rows = self._get_dns_entry_trs()
File "/usr/local/lib/python3.6/dist-packages/lexicon/providers/easyname.py", line 321, in _get_dns_entry_trs
'Could not load DNS entries.'
AssertionError: Could not load DNS entries.
```
Credentials are correct. The account is with easyname.at.
| lexicon/providers/easyname.py
<|code_start|>"""Module provider for Easyname DNS"""
from __future__ import absolute_import, print_function
import logging
from requests import Response, Session
# Handling optional dependency for this provider
try:
from bs4 import BeautifulSoup, Tag
except ImportError:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['easyname.eu']
def provider_parser(subparser):
"""Configure provider parser for Easyname DNS"""
subparser.description = """A provider for Easyname DNS."""
subparser.add_argument(
'--auth-username',
help='Specify username used to authenticate'
)
subparser.add_argument(
'--auth-password',
help='Specify password used to authenticate',
)
class Provider(BaseProvider):
"""
easyname provider
"""
URLS = {
'login': 'https://my.easyname.com/en/login',
'domain_list': 'https://my.easyname.com/domains/',
'overview': 'https://my.easyname.com/hosting/view-user.php',
'dns': 'https://my.easyname.com/domains/settings/dns.php?domain={}',
'dns_create_entry': 'https://my.easyname.com/domains/settings/form.php?domain={}',
'dns_delete_entry':
'https://my.easyname.com/domains/settings/delete_record.php?domain={}&confirm=1&id={}'
}
def __init__(self, config):
super(Provider, self).__init__(config)
self.session = Session()
self.domain_id = None
self._records = None
def _authenticate(self):
"""
Authenticates against Easyname website and try to find out the domain
id.
Easyname uses a CSRF token in its login form, so two requests are
neccessary to actually login.
Returns:
bool: True if domain id was found.
Raises:
AssertionError: When a request returns unexpected or unknown data.
ValueError: When login data is wrong or the domain does not exist.
"""
csrf_token = self._get_csrf_token()
self._login(csrf_token)
domain_text_element = self._get_domain_text_of_authoritative_zone()
self.domain_id = self._get_domain_id(domain_text_element)
LOGGER.debug('Easyname domain ID: %s', self.domain_id)
return True
def _create_record(self, rtype, name, content):
return self._create_record_internal(rtype=rtype, name=name, content=content)
def _create_record_internal(self, rtype, name, content, identifier=None):
"""
Create a new DNS entry in the domain zone if it does not already exist.
Args:
rtype (str): The DNS type (e.g. A, TXT, MX, etc) of the new entry.
name (str): The name of the new DNS entry, e.g the domain for which a
MX entry shall be valid.
content (str): The content of the new DNS entry, e.g. the mail server
hostname for a MX entry.
[identifier] (str): The easyname id of a DNS entry. Use to overwrite an
existing entry.
Returns:
bool: True if the record was created successfully, False otherwise.
"""
name = self._relative_name(name) if name is not None else name
LOGGER.debug('Creating record with name %s', name)
if self._is_duplicate_record(rtype, name, content):
return True
data = self._get_post_data_to_create_dns_entry(rtype, name, content, identifier)
LOGGER.debug('Create DNS data: %s', data)
create_response = self.session.post(
self.URLS['dns_create_entry'].format(self.domain_id),
data=data
)
self._invalidate_records_cache()
self._log('Create DNS entry', create_response)
# Pull a list of records and check for ours
was_success = len(self._list_records(rtype, name, content)) > 0
if was_success:
msg = 'Successfully added record %s'
else:
msg = 'Failed to add record %s'
LOGGER.info(msg, name)
return was_success
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""
Delete one or more DNS entries in the domain zone that match the given
criteria.
Args:
[identifier] (str): An ID to match against DNS entry easyname IDs.
[rtype] (str): A DNS rtype (e.g. A, TXT, MX, etc) to match against DNS
entry types.
[name] (str): A name to match against DNS entry names.
[content] (str): A content to match against a DNS entry contents.
Returns:
bool: True if the record(s) were deleted successfully, False
otherwise.
"""
success_url = self.URLS['dns'].format(self.domain_id)
record_ids = self._get_matching_dns_entry_ids(identifier, rtype,
name, content)
LOGGER.debug('Record IDs to delete: %s', record_ids)
success = True
for rec_id in record_ids:
delete_response = self.session.get(
self.URLS['dns_delete_entry'].format(self.domain_id, rec_id))
self._invalidate_records_cache()
self._log('Delete DNS entry {}'.format(rec_id), delete_response)
success = success and delete_response.url == success_url
return success
def _update_record(self, identifier, rtype=None, name=None, content=None):
"""
Update a DNS entry identified by identifier or name in the domain zone.
Any non given argument will leave the current value of the DNS entry.
Args:
identifier (str): The easyname id of the DNS entry to update.
[rtype] (str): The DNS rtype (e.g. A, TXT, MX, etc) of the new entry.
[name] (str): The name of the new DNS entry, e.g the domain for which
a MX entry shall be valid.
[content] (str): The content of the new DNS entry, e.g. the mail
server hostname for a MX entry.
Returns:
bool: True if the record was updated successfully, False otherwise.
Raises:
AssertionError: When a request returns unexpected or unknown data.
"""
if identifier is not None:
identifier = int(identifier)
records = self._list_records_internal(identifier=identifier)
else:
records = self._list_records_internal(name=name, rtype=rtype)
LOGGER.debug('Records to update (%d): %s', len(records), records)
assert records, 'No record found to update'
success = True
for record in records:
name = name if name is not None else record['name']
rtype = rtype if rtype is not None else record['type']
content = content if content is not None \
else record['content']
success = success and self._create_record_internal(
rtype, name, content, record['id'])
return success
def _list_records(self, rtype=None, name=None, content=None):
return self._list_records_internal(rtype=rtype, name=name, content=content)
def _list_records_internal(self, rtype=None, name=None, content=None, identifier=None):
"""
Filter and list DNS entries of domain zone on Easyname.
Easyname shows each entry in a HTML table row and each attribute on a
table column.
Args:
[rtype] (str): Filter by DNS rtype (e.g. A, TXT, MX, etc)
[name] (str): Filter by the name of the DNS entry, e.g the domain for
which a MX entry shall be valid.
[content] (str): Filter by the content of the DNS entry, e.g. the
mail server hostname for a MX entry.
[identifier] (str): Filter by the easyname id of the DNS entry.
Returns:
list: A list of DNS entries. A DNS entry is an object with DNS
attribute names as keys (e.g. name, content, priority, etc)
and additionally an id.
Raises:
AssertionError: When a request returns unexpected or unknown data.
"""
name = self._full_name(name) if name is not None else name
if self._records is None:
records = []
rows = self._get_dns_entry_trs()
for index, row in enumerate(rows):
self._log('DNS list entry', row)
try:
rec = {}
if row.has_attr('ondblclick'):
rec['id'] = int(row['ondblclick'].split(
'id=')[1].split("'")[0])
else:
rec['id'] = -index
columns = row.find_all('td')
rec['name'] = (columns[0].string or '').strip()
rec['type'] = (columns[1].contents[1] or '').strip()
rec['content'] = (columns[2].string or '').strip()
rec['priority'] = (columns[3].string or '').strip()
rec['ttl'] = (columns[4].string or '').strip()
if rec['priority']:
rec['priority'] = int(rec['priority'])
if rec['ttl']:
rec['ttl'] = int(rec['ttl'])
except Exception as error:
errmsg = 'Cannot parse DNS entry ({}).'.format(error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
records.append(rec)
self._records = records
records = self._filter_records(self._records, rtype, name, content, identifier)
LOGGER.debug('Final records (%d): %s', len(records), records)
return records
def _request(self, action='GET', url='/', data=None, query_params=None):
pass
def _invalidate_records_cache(self):
"""
Invalidate DNS entries cache such that list_records will do a new
request to retrieve DNS entries.
"""
self._records = None
def _get_post_data_to_create_dns_entry(self, rtype, name, content, identifier=None):
"""
Build and return the post date that is needed to create a DNS entry.
"""
is_update = identifier is not None
record = None
if is_update:
records = self._list_records_internal(identifier=identifier)
assert len(records) == 1, 'ID is not unique or does not exist'
record = records[0]
LOGGER.debug('Create post data to update record: %s', record)
data = {
'id': str(identifier) if is_update else '',
'action': 'save',
'name': name,
'type': rtype,
'content': content,
'prio': str(record['priority']) if is_update else '10',
'ttl': str(record['ttl']) if is_update else '360',
'commit': ''
}
ttl = self._get_lexicon_option('ttl')
if ttl and ttl > 360:
data['ttl'] = str(ttl)
prio = self._get_lexicon_option('priority')
if prio and prio > 0:
data['prio'] = str(prio)
return data
def _is_duplicate_record(self, rtype, name, content):
"""Check if DNS entry already exists."""
records = self._list_records(rtype, name, content)
is_duplicate = len(records) >= 1
if is_duplicate:
LOGGER.info('Duplicate record %s %s %s, NOOP', rtype, name, content)
return is_duplicate
def _get_matching_dns_entry_ids(self, identifier=None, rtype=None,
name=None, content=None):
"""Return a list of DNS entries that match the given criteria."""
record_ids = []
if not identifier:
records = self._list_records(rtype, name, content)
record_ids = [record['id'] for record in records]
else:
record_ids.append(identifier)
return record_ids
def _get_dns_entry_trs(self):
"""
Return the TR elements holding the DNS entries.
"""
dns_list_response = self.session.get(
self.URLS['dns'].format(self.domain_id))
self._log('DNS list', dns_list_response)
assert dns_list_response.status_code == 200, \
'Could not load DNS entries.'
html = BeautifulSoup(dns_list_response.content, 'html.parser')
self._log('DNS list', html)
dns_table = html.find('table', {'id': 'cp_domains_dnseintraege'})
assert dns_table is not None, 'Could not find DNS entry table'
def _is_zone_tr(elm):
has_ondblclick = elm.has_attr('ondblclick')
has_class = elm.has_attr('class')
return elm.name.lower() == 'tr' and (has_class or has_ondblclick)
rows = dns_table.findAll(_is_zone_tr)
assert rows is not None and rows, 'Could not find any DNS entries'
return rows
def _filter_records(self, records, rtype=None, name=None, content=None, identifier=None): # pylint: disable=too-many-arguments,no-self-use
"""
Filter dns entries based on type, name or content.
"""
if not records:
return []
if identifier is not None:
LOGGER.debug('Filtering %d records by id: %s', len(records), identifier)
records = [record for record in records if record['id'] == identifier]
if rtype is not None:
LOGGER.debug('Filtering %d records by type: %s', len(records), rtype)
records = [record for record in records if record['type'] == rtype]
if name is not None:
LOGGER.debug('Filtering %d records by name: %s', len(records), name)
if name.endswith('.'):
name = name[:-1]
records = [record for record in records if name == record['name']]
if content is not None:
LOGGER.debug('Filtering %d records by content: %s', len(records), content.lower())
records = [record for record in records if
record['content'].lower() == content.lower()]
return records
def _get_csrf_token(self):
"""Return the CSRF Token of easyname login form."""
home_response = self.session.get(self.URLS['login'])
self._log('Home', home_response)
assert home_response.status_code == 200, \
'Could not load Easyname login page.'
html = BeautifulSoup(home_response.content, 'html.parser')
self._log('Home', html)
csrf_token_field = html.find('input', {'id': 'loginxtoken'})
assert csrf_token_field is not None, 'Could not find login token.'
return csrf_token_field['value']
def _login(self, csrf_token):
"""Attempt to login session on easyname."""
login_response = self.session.post(
self.URLS['login'],
data={
'username': self._get_provider_option('auth_username') or '',
'password': self._get_provider_option('auth_password') or '',
'submit': '',
'loginxtoken': csrf_token,
}
)
self._log('Login', login_response)
assert login_response.status_code == 200, \
'Could not login due to a network error.'
assert login_response.url == self.URLS['domain_list'], \
'Easyname login failed, bad EASYNAME_USER or EASYNAME_PASS.%s' % login_response.url
def _get_domain_text_of_authoritative_zone(self):
"""Get the authoritative name zone."""
# We are logged in, so get the domain list
zones_response = self.session.get(self.URLS['domain_list'])
self._log('Zone', zones_response)
assert zones_response.status_code == 200, \
'Could not retrieve domain list due to a network error.'
html = BeautifulSoup(zones_response.content, 'html.parser')
self._log('Zone', html)
domain_table = html.find('table', {'id': 'cp_domain_table'})
assert domain_table is not None, 'Could not find domain table'
# (Sub)domains can either be managed in their own zones or by the
# zones of their parent (sub)domains. Iterate over all subdomains
# (starting with the deepest one) and see if there is an own zone
# for it.
domain = self.domain or ''
domain_text = None
subdomains = domain.split('.')
while True:
domain = '.'.join(subdomains)
LOGGER.debug('Check if %s has own zone', domain)
domain_text = domain_table.find(string=domain)
if domain_text is not None or len(subdomains) < 3:
break
subdomains.pop(0)
# Update domain to equal the zone's domain. This is important if we are
# handling a subdomain that has no zone of itself. If we do not do
# this, self._relative_name will strip also a part of the subdomain
# away.
self.domain = domain
assert domain_text is not None, \
'The domain does not exist on Easyname.'
return domain_text
def _get_domain_id(self, domain_text_element): # pylint: disable=no-self-use
"""Return the easyname id of the domain."""
try:
# Hierarchy: TR > TD > SPAN > Domain Text
tr_anchor = domain_text_element.parent.parent.parent
td_anchor = tr_anchor.find('td', {'class': 'td_2'})
link = td_anchor.find('a')['href']
domain_id = link.rsplit('/', 1)[-1]
return domain_id
except Exception as error:
errmsg = ('Cannot get the domain id even though the domain seems '
'to exist (%s).', error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
def _log(self, name, element): # pylint: disable=no-self-use
"""
Log Response and Tag elements. Do nothing if elements is none of them.
"""
if isinstance(element, Response):
LOGGER.debug('%s response: URL=%s Code=%s', name, element.url, element.status_code)
elif isinstance(element, (BeautifulSoup, Tag)):
LOGGER.debug('%s HTML:\n%s', name, element)
<|code_end|>
| lexicon/providers/easyname.py
<|code_start|>"""Module provider for Easyname DNS"""
from __future__ import absolute_import, print_function
import logging
from requests import Response, Session
# Handling optional dependency for this provider
try:
from bs4 import BeautifulSoup, Tag
except ImportError:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ['easyname.eu']
def provider_parser(subparser):
"""Configure provider parser for Easyname DNS"""
subparser.description = """A provider for Easyname DNS."""
subparser.add_argument(
'--auth-username',
help='Specify username used to authenticate'
)
subparser.add_argument(
'--auth-password',
help='Specify password used to authenticate',
)
class Provider(BaseProvider):
"""
easyname provider
"""
URLS = {
'login': 'https://my.easyname.com/en/login',
'domain_list': 'https://my.easyname.com/domains/',
'overview': 'https://my.easyname.com/hosting/view-user.php',
'dns': 'https://my.easyname.com/en/domain/dns/index/domain/{}/',
'dns_create_entry': 'https://my.easyname.com/en/domain/dns/create/domain/{}',
'dns_delete_entry':
'https://my.easyname.com/en/domain/dns/delete/domain/{}/id/{}',
'dns_delete_entry_confirm':
'https://my.easyname.com/en/domain/dns/delete/domain/{}/id/{}/confirm/1',
}
def __init__(self, config):
super(Provider, self).__init__(config)
self.session = Session()
self.domain_id = None
self._records = None
def _authenticate(self):
"""
Authenticates against Easyname website and try to find out the domain
id.
Easyname uses a CSRF token in its login form, so two requests are
neccessary to actually login.
Returns:
bool: True if domain id was found.
Raises:
AssertionError: When a request returns unexpected or unknown data.
ValueError: When login data is wrong or the domain does not exist.
"""
csrf_token = self._get_csrf_token()
self._login(csrf_token)
domain_text_element = self._get_domain_text_of_authoritative_zone()
self.domain_id = self._get_domain_id(domain_text_element)
LOGGER.debug('Easyname domain ID: %s', self.domain_id)
return True
def _create_record(self, rtype, name, content):
return self._create_record_internal(rtype=rtype, name=name, content=content)
def _create_record_internal(self, rtype, name, content, identifier=None):
"""
Create a new DNS entry in the domain zone if it does not already exist.
Args:
rtype (str): The DNS type (e.g. A, TXT, MX, etc) of the new entry.
name (str): The name of the new DNS entry, e.g the domain for which a
MX entry shall be valid.
content (str): The content of the new DNS entry, e.g. the mail server
hostname for a MX entry.
[identifier] (str): The easyname id of a DNS entry. Use to overwrite an
existing entry.
Returns:
bool: True if the record was created successfully, False otherwise.
"""
name = self._relative_name(name) if name is not None else name
LOGGER.debug('Creating record with name %s', name)
if self._is_duplicate_record(rtype, name, content):
return True
data = self._get_post_data_to_create_dns_entry(rtype, name, content, identifier)
LOGGER.debug('Create DNS data: %s', data)
create_response = self.session.post(
self.URLS['dns_create_entry'].format(self.domain_id),
data=data
)
self._invalidate_records_cache()
self._log('Create DNS entry', create_response)
# Pull a list of records and check for ours
was_success = len(self._list_records(rtype, name, content)) > 0
if was_success:
msg = 'Successfully added record %s'
else:
msg = 'Failed to add record %s'
LOGGER.info(msg, name)
return was_success
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""
Delete one or more DNS entries in the domain zone that match the given
criteria.
Args:
[identifier] (str): An ID to match against DNS entry easyname IDs.
[rtype] (str): A DNS rtype (e.g. A, TXT, MX, etc) to match against DNS
entry types.
[name] (str): A name to match against DNS entry names.
[content] (str): A content to match against a DNS entry contents.
Returns:
bool: True if the record(s) were deleted successfully, False
otherwise.
"""
record_ids = self._get_matching_dns_entry_ids(identifier, rtype,
name, content)
LOGGER.debug('Record IDs to delete: %s', record_ids)
success = True
for rec_id in record_ids:
delete_response = self.session.get(
self.URLS['dns_delete_entry'].format(self.domain_id, rec_id))
delete_response_confirm = self.session.post(
self.URLS['dns_delete_entry_confirm'].format(self.domain_id, rec_id))
self._invalidate_records_cache()
self._log('Delete DNS entry {}'.format(rec_id), delete_response)
#success = success and delete_response_confirm.url == success_url
success = 'feedback-message--success' in delete_response_confirm.text
return success
def _update_record(self, identifier, rtype=None, name=None, content=None):
"""
Update a DNS entry identified by identifier or name in the domain zone.
Any non given argument will leave the current value of the DNS entry.
Args:
identifier (str): The easyname id of the DNS entry to update.
[rtype] (str): The DNS rtype (e.g. A, TXT, MX, etc) of the new entry.
[name] (str): The name of the new DNS entry, e.g the domain for which
a MX entry shall be valid.
[content] (str): The content of the new DNS entry, e.g. the mail
server hostname for a MX entry.
Returns:
bool: True if the record was updated successfully, False otherwise.
Raises:
AssertionError: When a request returns unexpected or unknown data.
"""
if identifier is not None:
identifier = int(identifier)
records = self._list_records_internal(identifier=identifier)
else:
records = self._list_records_internal(name=name, rtype=rtype)
LOGGER.debug('Records to update (%d): %s', len(records), records)
assert records, 'No record found to update'
success = True
for record in records:
name = name if name is not None else record['name']
rtype = rtype if rtype is not None else record['type']
content = content if content is not None \
else record['content']
success = success and self._create_record_internal(
rtype, name, content, record['id'])
return success
def _list_records(self, rtype=None, name=None, content=None):
return self._list_records_internal(rtype=rtype, name=name, content=content)
def _list_records_internal(self, rtype=None, name=None, content=None, identifier=None):
"""
Filter and list DNS entries of domain zone on Easyname.
Easyname shows each entry in a HTML table row and each attribute on a
table column.
Args:
[rtype] (str): Filter by DNS rtype (e.g. A, TXT, MX, etc)
[name] (str): Filter by the name of the DNS entry, e.g the domain for
which a MX entry shall be valid.
[content] (str): Filter by the content of the DNS entry, e.g. the
mail server hostname for a MX entry.
[identifier] (str): Filter by the easyname id of the DNS entry.
Returns:
list: A list of DNS entries. A DNS entry is an object with DNS
attribute names as keys (e.g. name, content, priority, etc)
and additionally an id.
Raises:
AssertionError: When a request returns unexpected or unknown data.
"""
name = self._full_name(name) if name is not None else name
if self._records is None:
records = []
# skip the first record which contains the table header
rows = self._get_dns_entry_trs()[1:]
for row in rows:
self._log('DNS list entry', row)
try:
rec = {}
columns = row.find_all('td')
rec['name'] = (columns[0].string or '').strip()
rec['type'] = (columns[1].contents[1] or '').strip()
rec['content'] = (columns[2].string or '').strip()
rec['priority'] = (columns[3].string or '').strip()
rec['ttl'] = (columns[4].string or '').strip()
rec['id'] = int(columns[5].find("a")["href"].rsplit('/', 1)[-1])
if rec['priority']:
rec['priority'] = int(rec['priority'])
if rec['ttl']:
rec['ttl'] = int(rec['ttl'])
except Exception as error:
errmsg = 'Cannot parse DNS entry ({}).'.format(error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
records.append(rec)
self._records = records
records = self._filter_records(self._records, rtype, name, content, identifier)
LOGGER.debug('Final records (%d): %s', len(records), records)
return records
def _request(self, action='GET', url='/', data=None, query_params=None):
pass
def _invalidate_records_cache(self):
"""
Invalidate DNS entries cache such that list_records will do a new
request to retrieve DNS entries.
"""
self._records = None
def _get_post_data_to_create_dns_entry(self, rtype, name, content, identifier=None):
"""
Build and return the post date that is needed to create a DNS entry.
"""
is_update = identifier is not None
record = None
if is_update:
records = self._list_records_internal(identifier=identifier)
assert len(records) == 1, 'ID is not unique or does not exist'
record = records[0]
LOGGER.debug('Create post data to update record: %s', record)
data = {
'id': str(identifier) if is_update else '',
'name': name,
'type': rtype,
'content': content,
'priority': str(record['priority']) if is_update else '10',
'ttl': str(record['ttl']) if is_update else '360',
}
ttl = self._get_lexicon_option('ttl')
if ttl and ttl > 360:
data['ttl'] = str(ttl)
prio = self._get_lexicon_option('priority')
if prio and prio > 0:
data['priority'] = str(prio)
return data
def _is_duplicate_record(self, rtype, name, content):
"""Check if DNS entry already exists."""
records = self._list_records(rtype, name, content)
is_duplicate = len(records) >= 1
if is_duplicate:
LOGGER.info('Duplicate record %s %s %s, NOOP', rtype, name, content)
return is_duplicate
def _get_matching_dns_entry_ids(self, identifier=None, rtype=None,
name=None, content=None):
"""Return a list of DNS entries that match the given criteria."""
record_ids = []
if not identifier:
records = self._list_records(rtype, name, content)
record_ids = [record['id'] for record in records]
else:
record_ids.append(identifier)
return record_ids
def _get_dns_entry_trs(self):
"""
Return the TR elements holding the DNS entries.
"""
dns_list_response = self.session.get(
self.URLS['dns'].format(self.domain_id))
self._log('DNS list', dns_list_response)
assert dns_list_response.status_code == 200, \
'Could not load DNS entries.'
html = BeautifulSoup(dns_list_response.content, 'html.parser')
self._log('DNS list', html)
dns_table = html.find('table', {'id': 'cp_domains_dnseintraege'})
assert dns_table is not None, 'Could not find DNS entry table'
def _is_zone_tr(elm):
return elm.name.lower() == 'tr' and (not elm.has_attr('class'))
rows = dns_table.findAll(_is_zone_tr)
assert rows is not None and rows, 'Could not find any DNS entries'
return rows
def _filter_records(self, records, rtype=None, name=None, content=None, identifier=None): # pylint: disable=too-many-arguments,no-self-use
"""
Filter dns entries based on type, name or content.
"""
if not records:
return []
if identifier is not None:
LOGGER.debug('Filtering %d records by id: %s', len(records), identifier)
records = [record for record in records if record['id'] == identifier]
if rtype is not None:
LOGGER.debug('Filtering %d records by type: %s', len(records), rtype)
records = [record for record in records if record['type'] == rtype]
if name is not None:
LOGGER.debug('Filtering %d records by name: %s', len(records), name)
if name.endswith('.'):
name = name[:-1]
records = [record for record in records if name == record['name']]
if content is not None:
LOGGER.debug('Filtering %d records by content: %s', len(records), content.lower())
records = [record for record in records if
record['content'].lower() == content.lower()]
return records
def _get_csrf_token(self):
"""Return the CSRF Token of easyname login form."""
home_response = self.session.get(self.URLS['login'])
self._log('Home', home_response)
assert home_response.status_code == 200, \
'Could not load Easyname login page.'
html = BeautifulSoup(home_response.content, 'html.parser')
self._log('Home', html)
csrf_token_field = html.find('input', {'id': 'loginxtoken'})
assert csrf_token_field is not None, 'Could not find login token.'
return csrf_token_field['value']
def _login(self, csrf_token):
"""Attempt to login session on easyname."""
login_response = self.session.post(
self.URLS['login'],
data={
'username': self._get_provider_option('auth_username') or '',
'password': self._get_provider_option('auth_password') or '',
'submit': '',
'loginxtoken': csrf_token,
}
)
self._log('Login', login_response)
assert login_response.status_code == 200, \
'Could not login due to a network error.'
assert login_response.url == self.URLS['domain_list'], \
'Easyname login failed, bad EASYNAME_USER or EASYNAME_PASS.%s' % login_response.url
def _get_domain_text_of_authoritative_zone(self):
"""Get the authoritative name zone."""
# We are logged in, so get the domain list
zones_response = self.session.get(self.URLS['domain_list'])
self._log('Zone', zones_response)
assert zones_response.status_code == 200, \
'Could not retrieve domain list due to a network error.'
html = BeautifulSoup(zones_response.content, 'html.parser')
self._log('Zone', html)
domain_table = html.find('table', {'id': 'cp_domain_table'})
assert domain_table is not None, 'Could not find domain table'
# (Sub)domains can either be managed in their own zones or by the
# zones of their parent (sub)domains. Iterate over all subdomains
# (starting with the deepest one) and see if there is an own zone
# for it.
domain = self.domain or ''
domain_text = None
subdomains = domain.split('.')
while True:
domain = '.'.join(subdomains)
LOGGER.debug('Check if %s has own zone', domain)
domain_text = domain_table.find(string=domain)
if domain_text is not None or len(subdomains) < 3:
break
subdomains.pop(0)
# Update domain to equal the zone's domain. This is important if we are
# handling a subdomain that has no zone of itself. If we do not do
# this, self._relative_name will strip also a part of the subdomain
# away.
self.domain = domain
assert domain_text is not None, \
'The domain does not exist on Easyname.'
return domain_text
def _get_domain_id(self, domain_text_element): # pylint: disable=no-self-use
"""Return the easyname id of the domain."""
try:
# Hierarchy: TR > TD > SPAN > Domain Text
tr_anchor = domain_text_element.parent.parent.parent
td_anchor = tr_anchor.find('td', {'class': 'td_2'})
link = td_anchor.find('a')['href']
domain_id = link.rsplit('/', 1)[-1]
return domain_id
except Exception as error:
errmsg = ('Cannot get the domain id even though the domain seems '
'to exist (%s).', error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
def _log(self, name, element): # pylint: disable=no-self-use
"""
Log Response and Tag elements. Do nothing if elements is none of them.
"""
if isinstance(element, Response):
LOGGER.debug('%s response: URL=%s Code=%s', name, element.url, element.status_code)
elif isinstance(element, (BeautifulSoup, Tag)):
LOGGER.debug('%s HTML:\n%s', name, element)
<|code_end|>
|
Route53 provider does not handle >100 hosted zones
Hello,
I ran into an interesting problem while setting up a system with certbot + lexicon. We have almost 200 hosted zones in AWS Route 53 and we were getting a "No domains found." error which I finally traced to the _authenticate method. I hacked together some changes to this method so it makes repeated calls to get all the hosted zones before searching for the one for which the hosted zone id is needed. Based on the Boto3 docs it looks like even though this method has a MaxItems parameter, the absolute max is 100.
I don't think this is the best written change, so I am just including it here for someone to perhaps include it in a future update.
Thanks.
```
def _authenticate(self):
"""Determine the hosted zone id for the domain."""
try:
is_truncated=True
next_dns_name=''
next_hz_id=''
hosted_zones=[]
while(is_truncated):
if(next_dns_name==''):
hosted_zones_dict = self.r53_client.list_hosted_zones_by_name()
else:
hosted_zones_dict = self.r53_client.list_hosted_zones_by_name(DNSName=next_dns_name,HostedZoneId=next_hz_id)
hosted_zones += hosted_zones_dict[
'HostedZones'
]
is_truncated = hosted_zones_dict[
'IsTruncated'
]
if(is_truncated):
next_dns_name = hosted_zones_dict[
'NextDNSName'
]
next_hz_id = hosted_zones_dict[
'NextHostedZoneId'
]
hosted_zone = next(
hz for hz in hosted_zones
if self.filter_zone(hz)
)
self.domain_id = hosted_zone['Id']
except StopIteration:
raise Exception('No domain found')
```
| lexicon/providers/route53.py
<|code_start|>"""Provide support to Lexicon for AWS Route 53 DNS changes."""
from __future__ import absolute_import
import hashlib
import logging
import re
from lexicon.providers.base import Provider as BaseProvider
try:
import boto3 # optional dep
import botocore # optional dep
except ImportError:
pass
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = [re.compile(r"^awsdns-\d+\.\w+$")]
def provider_parser(subparser):
"""Specify arguments for AWS Route 53 Lexicon Provider."""
subparser.add_argument(
"--auth-access-key", help="specify ACCESS_KEY for authentication"
)
subparser.add_argument(
"--auth-access-secret", help="specify ACCESS_SECRET for authentication"
)
subparser.add_argument(
"--private-zone",
help=(
"indicates what kind of hosted zone to use. If true, use "
"only private zones. If false, use only public zones"
),
)
# TODO: these are only required for testing, we should figure out
# a way to remove them & update the integration tests
# to dynamically populate the auth credentials that are required.
subparser.add_argument(
"--auth-username",
help="alternative way to specify the ACCESS_KEY for authentication",
)
subparser.add_argument(
"--auth-token",
help="alternative way to specify the ACCESS_SECRET for authentication",
)
class RecordSetPaginator(object):
"""Paginate through complete list of record sets."""
def __init__(self, r53_client, hosted_zone_id, max_items=None):
"""Initialize paginator."""
self.r53_client = r53_client
self.hosted_zone_id = hosted_zone_id
self.max_items = max_items
def get_record_sets(self, **kwargs):
"""Retrieve a page from API."""
return self.r53_client.list_resource_record_sets(**kwargs)
def get_base_kwargs(self):
"""Get base kwargs for API call."""
kwargs = {"HostedZoneId": self.hosted_zone_id}
if self.max_items is not None:
kwargs.update({"MaxItems": str(self.max_items)})
return kwargs
def all_record_sets(self):
"""Generator to loop through current record set.
Call next page if it exists.
"""
is_truncated = True
start_record_name = None
start_record_type = None
kwargs = self.get_base_kwargs()
while is_truncated:
if start_record_name is not None:
kwargs.update(
{
"StartRecordName": start_record_name,
"StartRecordType": start_record_type,
}
)
result = self.get_record_sets(**kwargs)
for record_set in result.get("ResourceRecordSets", []):
yield record_set
is_truncated = result.get("IsTruncated", False)
start_record_name = result.get("NextRecordName", None)
start_record_type = result.get("NextRecordType", None)
class Provider(BaseProvider):
"""Provide AWS Route 53 implementation of Lexicon Provider interface."""
def __init__(self, config):
"""Initialize AWS Route 53 DNS provider."""
super(Provider, self).__init__(config)
self.domain_id = None
self.private_zone = self._get_provider_option("private_zone")
# instantiate the client
self.r53_client = boto3.client(
"route53",
aws_access_key_id=self._get_provider_option("auth_access_key")
or self._get_provider_option("auth_username"),
aws_secret_access_key=self._get_provider_option("auth_access_secret")
or self._get_provider_option("auth_token"),
)
def filter_zone(self, data):
"""Check if a zone is private"""
if self.private_zone is not None:
if data["Config"]["PrivateZone"] != self.str2bool(self.private_zone):
return False
if data["Name"] != f"{self.domain}.":
return False
return True
@staticmethod
def str2bool(input_string):
"""Convert a string to boolean"""
return input_string.lower() in ("true", "yes")
def _authenticate(self):
"""Determine the hosted zone id for the domain."""
try:
hosted_zones = self.r53_client.list_hosted_zones_by_name()["HostedZones"]
hosted_zone = next(hz for hz in hosted_zones if self.filter_zone(hz))
self.domain_id = hosted_zone["Id"]
except StopIteration:
raise Exception("No domain found")
def _change_record_sets(self, action, rtype, name, content):
ttl = self._get_lexicon_option("ttl")
resource_records = []
if isinstance(content, list):
for i in content:
value = f'"{i}"' if rtype in ["TXT", "SPF"] else i
resource_records.append({"Value": value})
else:
value = f'"{content}"' if rtype in ["TXT", "SPF"] else content
resource_records.append({"Value": value})
try:
self.r53_client.change_resource_record_sets(
HostedZoneId=self.domain_id,
ChangeBatch={
"Comment": f"{action} using lexicon Route 53 provider",
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": self._fqdn_name(name),
"Type": rtype,
"TTL": ttl if ttl is not None else 300,
"ResourceRecords": resource_records,
},
}
],
},
)
return True
except botocore.exceptions.ClientError as error:
if "Duplicate Resource Record" in error.response["Error"]["Message"]:
# Duplicate resource, that have been a noop. This is expected.
return True
LOGGER.error(str(error), exc_info=True)
return False
def _create_record(self, rtype, name, content):
"""Create a record in the hosted zone."""
existing_records = self._list_record_sets(rtype, name)
if existing_records:
existing_record = existing_records[0]
if isinstance(existing_records[0]["content"], list):
return self._change_record_sets(
"UPSERT",
existing_record["type"],
existing_record["name"],
existing_record["content"] + [content],
)
return self._change_record_sets(
"UPSERT", rtype, name, [existing_record["content"]] + [content]
)
return self._change_record_sets("CREATE", rtype, name, content)
def _update_record(self, identifier=None, rtype=None, name=None, content=None):
"""Update a record from the hosted zone."""
if identifier:
records = [
record
for record in self._list_records()
if identifier == _identifier(record)
]
if not records:
raise ValueError(f"No record found for identifier {identifier}")
record = records[0]
rtype = record["type"]
name = record["name"]
existing_records = self._list_record_sets(rtype, name)
if not existing_records:
raise ValueError("No matching record to update was found.")
for existing_record in existing_records:
if isinstance(existing_record["content"], list):
# Multiple values in record.
LOGGER.warning(
"Warning, multiple records found for given parameters, "
"only first entry will be updated: %s",
existing_record,
)
new_content = existing_record["content"].copy()
new_content[0] = content
else:
new_content = content
self._change_record_sets(
"UPSERT", existing_record["type"], existing_record["name"], new_content
)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""Delete a record from the hosted zone."""
if identifier:
matching_records = [
record
for record in self._list_records()
if identifier == _identifier(record)
]
if not matching_records:
raise ValueError(f"No record found for identifier {identifier}")
rtype = matching_records[0]["type"]
name = matching_records[0]["name"]
content = matching_records[0]["content"]
existing_records = self._list_record_sets(rtype, name, content)
if not existing_records:
raise ValueError("No record found for the provided type, name and content")
for existing_record in existing_records:
if isinstance(existing_record["content"], list) and content is not None:
# multiple values in record, just remove one value and only if it actually exist
if content in existing_record["content"]:
existing_record["content"].remove(content)
self._change_record_sets(
"UPSERT",
existing_record["type"],
existing_record["name"],
existing_record["content"],
)
else:
# if only one record exist, or if content is not specified, remove whole record
self._change_record_sets(
"DELETE",
existing_record["type"],
existing_record["name"],
existing_record["content"],
)
return True
def _list_records(self, rtype=None, name=None, content=None):
"""List all records for the hosted zone."""
records = self._list_record_sets(rtype, name, content)
flatten_records = []
for record in records:
if isinstance(record["content"], list):
for one_content in record["content"]:
flatten_record = record.copy()
flatten_record["content"] = one_content
flatten_record["id"] = _identifier(flatten_record)
flatten_records.append(flatten_record)
else:
record["id"] = _identifier(record)
flatten_records.append(record)
LOGGER.debug("list_records: %s", records)
return flatten_records
def _list_record_sets(self, rtype=None, name=None, content=None):
records = []
paginator = RecordSetPaginator(self.r53_client, self.domain_id)
for record in paginator.all_record_sets():
record_content = []
if rtype is not None and record["Type"] != rtype:
continue
if name is not None and record["Name"] != self._fqdn_name(name):
continue
if record.get("AliasTarget", None) is not None:
record_content = [record["AliasTarget"].get("DNSName", None)]
if record.get("ResourceRecords", None) is not None:
record_content = [
_format_content(record["Type"], value["Value"])
for value in record["ResourceRecords"]
]
if content is not None and content not in record_content:
continue
LOGGER.debug("record: %s", record)
records.append(
{
"type": record["Type"],
"name": self._full_name(record["Name"]),
"ttl": record.get("TTL", None),
"content": record_content[0]
if len(record_content) == 1
else record_content,
}
)
return records
def _request(self, action="GET", url="/", data=None, query_params=None):
# Helper _request is not used in Route53 provider
pass
def _format_content(rtype, content):
return content[1:-1] if rtype in ["TXT", "SPF"] else content
def _identifier(record):
sha256 = hashlib.sha256()
sha256.update(("type=" + record.get("type", "") + ",").encode("utf-8"))
sha256.update(("name=" + record.get("name", "") + ",").encode("utf-8"))
sha256.update(("data=" + record.get("data", "") + ",").encode("utf-8"))
return sha256.hexdigest()[0:7]
<|code_end|>
| lexicon/providers/route53.py
<|code_start|>"""Provide support to Lexicon for AWS Route 53 DNS changes."""
from __future__ import absolute_import
import hashlib
import logging
import re
from lexicon.providers.base import Provider as BaseProvider
try:
import boto3 # optional dep
import botocore # optional dep
except ImportError:
pass
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = [re.compile(r"^awsdns-\d+\.\w+$")]
def provider_parser(subparser):
"""Specify arguments for AWS Route 53 Lexicon Provider."""
subparser.add_argument(
"--auth-access-key", help="specify ACCESS_KEY for authentication"
)
subparser.add_argument(
"--auth-access-secret", help="specify ACCESS_SECRET for authentication"
)
subparser.add_argument(
"--private-zone",
help=(
"indicates what kind of hosted zone to use. If true, use "
"only private zones. If false, use only public zones"
),
)
# TODO: these are only required for testing, we should figure out
# a way to remove them & update the integration tests
# to dynamically populate the auth credentials that are required.
subparser.add_argument(
"--auth-username",
help="alternative way to specify the ACCESS_KEY for authentication",
)
subparser.add_argument(
"--auth-token",
help="alternative way to specify the ACCESS_SECRET for authentication",
)
class RecordSetPaginator(object):
"""Paginate through complete list of record sets."""
def __init__(self, r53_client, hosted_zone_id, max_items=None):
"""Initialize paginator."""
self.r53_client = r53_client
self.hosted_zone_id = hosted_zone_id
self.max_items = max_items
def get_record_sets(self, **kwargs):
"""Retrieve a page from API."""
return self.r53_client.list_resource_record_sets(**kwargs)
def get_base_kwargs(self):
"""Get base kwargs for API call."""
kwargs = {"HostedZoneId": self.hosted_zone_id}
if self.max_items is not None:
kwargs.update({"MaxItems": str(self.max_items)})
return kwargs
def all_record_sets(self):
"""Generator to loop through current record set.
Call next page if it exists.
"""
is_truncated = True
start_record_name = None
start_record_type = None
kwargs = self.get_base_kwargs()
while is_truncated:
if start_record_name is not None:
kwargs.update(
{
"StartRecordName": start_record_name,
"StartRecordType": start_record_type,
}
)
result = self.get_record_sets(**kwargs)
for record_set in result.get("ResourceRecordSets", []):
yield record_set
is_truncated = result.get("IsTruncated", False)
start_record_name = result.get("NextRecordName", None)
start_record_type = result.get("NextRecordType", None)
class Provider(BaseProvider):
"""Provide AWS Route 53 implementation of Lexicon Provider interface."""
def __init__(self, config):
"""Initialize AWS Route 53 DNS provider."""
super(Provider, self).__init__(config)
self.domain_id = None
self.private_zone = self._get_provider_option("private_zone")
# instantiate the client
self.r53_client = boto3.client(
"route53",
aws_access_key_id=self._get_provider_option("auth_access_key")
or self._get_provider_option("auth_username"),
aws_secret_access_key=self._get_provider_option("auth_access_secret")
or self._get_provider_option("auth_token"),
)
def filter_zone(self, data):
"""Check if a zone is private"""
if self.private_zone is not None:
if data["Config"]["PrivateZone"] != self.str2bool(self.private_zone):
return False
if data["Name"] != f"{self.domain}.":
return False
return True
@staticmethod
def str2bool(input_string):
"""Convert a string to boolean"""
return input_string.lower() in ("true", "yes")
def _authenticate(self):
"""Determine the hosted zone id for the domain."""
try:
is_truncated = True
next_dns_name = None
next_hz_id = None
hosted_zones = []
while is_truncated:
if not next_dns_name:
new_zones = self.r53_client.list_hosted_zones_by_name()
else:
new_zones = self.r53_client.list_hosted_zones_by_name(
DNSName=next_dns_name, HostedZoneId=next_hz_id
)
hosted_zones.extend(new_zones.get("HostedZones"))
is_truncated = new_zones.get("IsTruncated")
if is_truncated:
next_dns_name = new_zones.get("NextDNSName")
next_hz_id = new_zones.get("NextHostedZoneId")
hosted_zone = next(hz for hz in hosted_zones if self.filter_zone(hz))
self.domain_id = hosted_zone.get("Id")
except StopIteration:
raise Exception("No domain found")
def _change_record_sets(self, action, rtype, name, content):
ttl = self._get_lexicon_option("ttl")
resource_records = []
if isinstance(content, list):
for i in content:
value = f'"{i}"' if rtype in ["TXT", "SPF"] else i
resource_records.append({"Value": value})
else:
value = f'"{content}"' if rtype in ["TXT", "SPF"] else content
resource_records.append({"Value": value})
try:
self.r53_client.change_resource_record_sets(
HostedZoneId=self.domain_id,
ChangeBatch={
"Comment": f"{action} using lexicon Route 53 provider",
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": self._fqdn_name(name),
"Type": rtype,
"TTL": ttl if ttl is not None else 300,
"ResourceRecords": resource_records,
},
}
],
},
)
return True
except botocore.exceptions.ClientError as error:
if "Duplicate Resource Record" in error.response["Error"]["Message"]:
# Duplicate resource, that have been a noop. This is expected.
return True
LOGGER.error(str(error), exc_info=True)
return False
def _create_record(self, rtype, name, content):
"""Create a record in the hosted zone."""
existing_records = self._list_record_sets(rtype, name)
if existing_records:
existing_record = existing_records[0]
if isinstance(existing_records[0]["content"], list):
return self._change_record_sets(
"UPSERT",
existing_record["type"],
existing_record["name"],
existing_record["content"] + [content],
)
return self._change_record_sets(
"UPSERT", rtype, name, [existing_record["content"]] + [content]
)
return self._change_record_sets("CREATE", rtype, name, content)
def _update_record(self, identifier=None, rtype=None, name=None, content=None):
"""Update a record from the hosted zone."""
if identifier:
records = [
record
for record in self._list_records()
if identifier == _identifier(record)
]
if not records:
raise ValueError(f"No record found for identifier {identifier}")
record = records[0]
rtype = record["type"]
name = record["name"]
existing_records = self._list_record_sets(rtype, name)
if not existing_records:
raise ValueError("No matching record to update was found.")
for existing_record in existing_records:
if isinstance(existing_record["content"], list):
# Multiple values in record.
LOGGER.warning(
"Warning, multiple records found for given parameters, "
"only first entry will be updated: %s",
existing_record,
)
new_content = existing_record["content"].copy()
new_content[0] = content
else:
new_content = content
self._change_record_sets(
"UPSERT", existing_record["type"], existing_record["name"], new_content
)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""Delete a record from the hosted zone."""
if identifier:
matching_records = [
record
for record in self._list_records()
if identifier == _identifier(record)
]
if not matching_records:
raise ValueError(f"No record found for identifier {identifier}")
rtype = matching_records[0]["type"]
name = matching_records[0]["name"]
content = matching_records[0]["content"]
existing_records = self._list_record_sets(rtype, name, content)
if not existing_records:
raise ValueError("No record found for the provided type, name and content")
for existing_record in existing_records:
if isinstance(existing_record["content"], list) and content is not None:
# multiple values in record, just remove one value and only if it actually exist
if content in existing_record["content"]:
existing_record["content"].remove(content)
self._change_record_sets(
"UPSERT",
existing_record["type"],
existing_record["name"],
existing_record["content"],
)
else:
# if only one record exist, or if content is not specified, remove whole record
self._change_record_sets(
"DELETE",
existing_record["type"],
existing_record["name"],
existing_record["content"],
)
return True
def _list_records(self, rtype=None, name=None, content=None):
"""List all records for the hosted zone."""
records = self._list_record_sets(rtype, name, content)
flatten_records = []
for record in records:
if isinstance(record["content"], list):
for one_content in record["content"]:
flatten_record = record.copy()
flatten_record["content"] = one_content
flatten_record["id"] = _identifier(flatten_record)
flatten_records.append(flatten_record)
else:
record["id"] = _identifier(record)
flatten_records.append(record)
LOGGER.debug("list_records: %s", records)
return flatten_records
def _list_record_sets(self, rtype=None, name=None, content=None):
records = []
paginator = RecordSetPaginator(self.r53_client, self.domain_id)
for record in paginator.all_record_sets():
record_content = []
if rtype is not None and record["Type"] != rtype:
continue
if name is not None and record["Name"] != self._fqdn_name(name):
continue
if record.get("AliasTarget", None) is not None:
record_content = [record["AliasTarget"].get("DNSName", None)]
if record.get("ResourceRecords", None) is not None:
record_content = [
_format_content(record["Type"], value["Value"])
for value in record["ResourceRecords"]
]
if content is not None and content not in record_content:
continue
LOGGER.debug("record: %s", record)
records.append(
{
"type": record["Type"],
"name": self._full_name(record["Name"]),
"ttl": record.get("TTL", None),
"content": record_content[0]
if len(record_content) == 1
else record_content,
}
)
return records
def _request(self, action="GET", url="/", data=None, query_params=None):
# Helper _request is not used in Route53 provider
pass
def _format_content(rtype, content):
return content[1:-1] if rtype in ["TXT", "SPF"] else content
def _identifier(record):
sha256 = hashlib.sha256()
sha256.update(("type=" + record.get("type", "") + ",").encode("utf-8"))
sha256.update(("name=" + record.get("name", "") + ",").encode("utf-8"))
sha256.update(("data=" + record.get("data", "") + ",").encode("utf-8"))
return sha256.hexdigest()[0:7]
<|code_end|>
|
Plesk provider assumes `site_name` equals `domain` (which is not always true)
The plesk provider assumes that the `site_name` is always the same as the `domain`: https://github.com/AnalogJ/lexicon/blob/master/lexicon/providers/plesk.py#L52
That creates problems if one wants to create a record for a subdomain (e.g. `test.example.com`) for which there is no matching plesk site. It could be the case that the record for `test.example.com` (and even all records `*.test.example.com`) are part of the site `example.com`.
Example (source for the certbot plugin [here](https://gitlab.com/spike77453/certbot-dns-plesk/)):
```
# certbot certonly --authenticator dns-plesk --dns-plesk-credentials plesk.ini -d test.example.com
Saving debug log to /var/log/letsencrypt/letsencrypt.log
Plugins selected: Authenticator dns-plesk, Installer None
Simulating a certificate request for test.example.com
Performing the following challenges:
dns-01 challenge for test.example.com
Cleaning up challenges
Unexpected error determining zone identifier for test.example.com: API returned error: 1013 (Site does not exist)
```
One could fix this by doing something similar to the `Client` class and strip the subdomain part https://github.com/AnalogJ/lexicon/blob/master/lexicon/client.py#L36. The `Provider` constructor could do something like:
```
try:
domain_extractor = tldextract.TLDExtract(
cache_dir=_get_tldextract_cache_path(), include_psl_private_domains=True
)
except TypeError:
domain_extractor = tldextract.TLDExtract(
cache_file=_get_tldextract_cache_path(), include_psl_private_domains=True # type: ignore
)
domain_parts = domain_extractor(self.domain)
#self.site_name = self.domain
self.site_name = f"{domain_parts.domain}.{domain_parts.suffix}"
assert self.site_name is not None
```
And then instead of stripping the domain from `host`, one would strip the `site_name` here: https://github.com/AnalogJ/lexicon/blob/master/lexicon/providers/plesk.py#L179
That however doesn't work any more if there actually is a site for a particular subdomain.
How about adding an optional `site-id` provider option so the user can specify the site manually in the configuration file?
| lexicon/providers/plesk.py
<|code_start|>"""
Lexicon Plesk Provider
Author: Jens Reimann, 2018
API Docs: https://docs.plesk.com/en-US/onyx/api-rpc
"""
import logging
from collections import defaultdict
from typing import Dict, List, Optional
from xml.etree import cElementTree
from xml.etree.ElementTree import Element
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
PLEX_URL_SUFFIX = "/enterprise/control/agent.php"
NAMESERVER_DOMAINS: List[str] = []
def provider_parser(subparser):
"""Configure provider parser for Plesk"""
subparser.add_argument(
"--auth-username", help="specify username for authentication"
)
subparser.add_argument(
"--auth-password", help="specify password for authentication"
)
subparser.add_argument(
"--plesk-server", help="specify URL to the Plesk Web UI, including the port"
)
class Provider(BaseProvider):
"""Provider class for Plesk"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api_endpoint = self._get_provider_option("plesk_server")
if self.api_endpoint.endswith("/"):
self.api_endpoint = self.api_endpoint[:-1]
if not self.api_endpoint.endswith(PLEX_URL_SUFFIX):
self.api_endpoint += PLEX_URL_SUFFIX
self.site_name = self.domain
assert self.site_name is not None
self.domain_id = None
self.username = self._get_provider_option("auth_username")
assert self.username is not None
self.password = self._get_provider_option("auth_password")
assert self.password is not None
def __simple_request(self, rtype, operation, req):
response = self.__plesk_request({rtype: {operation: req}})[rtype][operation]
result = response["result"]
if isinstance(result, list):
for record in result:
if record["status"] == "error":
raise Exception(
f"API returned at least one error: {record['errtext']}"
)
elif response["result"]["status"] == "error":
errcode = response["result"]["errcode"]
errtext = response["result"]["errtext"]
raise Exception(f"API returned error: {errcode} ({errtext})")
return response
def __plesk_request(self, request):
headers = {
"Content-type": "text/xml",
"HTTP_PRETTY_PRINT": "TRUE",
"HTTP_AUTH_LOGIN": self.username,
"HTTP_AUTH_PASSWD": self.password,
}
xml = f"""\
<?xml version="1.0" encoding="utf-8"?>
{cElementTree.tostring(_dict_to_etree({"packet": request}), encoding="unicode")}\
"""
LOGGER.debug("Request: %s", xml)
response = requests.post(
self.api_endpoint,
headers=headers,
data=xml,
auth=(self.username, self.password),
)
data = response.text
LOGGER.debug("Response: %s", data)
result = _etree_to_dict(cElementTree.XML(data))
return result["packet"]
def __find_site(self):
return self.__simple_request(
"site", "get", {"filter": {"name": self.site_name, "dataset": {}}}
)["result"]["id"]
def _authenticate(self):
self.domain_id = self.__find_site()
if self.domain_id is None:
raise Exception("Domain not found")
def _create_record(self, rtype, name, content):
return self.__create_entry(rtype, name, content, None)
def _list_records(self, rtype=None, name=None, content=None):
entries = self.__find_dns_entries(rtype, name, content)
LOGGER.debug("list_records: %s", entries)
return entries
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier is None:
entries = self.__find_dns_entries(rtype, name, None)
LOGGER.debug("Entries found: %s", entries)
if not entries:
raise Exception("No entry found for updating")
identifier = entries[0]["id"]
entry = self.__get_dns_entry(identifier)
ids = []
for an_entry in entries:
ids.append(an_entry["id"])
self.__delete_dns_records_by_id(ids)
else:
entry = self.__get_dns_entry(identifier)
self.__delete_dns_records_by_id([identifier])
assert entry is not None
LOGGER.debug("Updating: %s", entry)
if rtype:
entry["type"] = rtype
if name:
entry["host"] = name
if content:
entry["value"] = content
return self.__create_entry(
entry["type"], entry["host"], entry["value"], entry["opt"]
)
def __create_entry(self, rtype, host, value, opt):
entries = self.__find_dns_entries(rtype, self._fqdn_name(host), value)
if entries:
return True # already exists
self.__simple_request(
"dns",
"add_rec",
{
"site-id": self.domain_id,
"type": rtype,
"host": self._relative_name(host),
"value": value,
"opt": opt,
},
)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier:
self.__delete_dns_records_by_id([identifier])
return True
entries = self.__find_dns_entries(rtype, self._fqdn_name(name), content)
ids = []
for entry in entries:
ids.append(entry["id"])
self.__delete_dns_records_by_id(ids)
return bool(ids)
def __get_dns_entry(self, identifier):
return self.__simple_request("dns", "get_rec", {"filter": {"id": identifier}})[
"result"
]["data"]
def __find_dns_entries(self, rtype=None, host=None, value=None):
LOGGER.debug("Searching for: %s, %s, %s", rtype, host, value)
if value and rtype and rtype in ["CNAME"]:
LOGGER.debug("CNAME transformation")
value = value.rstrip(".") + "."
if host:
host = self._fqdn_name(host)
result = self.__simple_request(
"dns", "get_rec", {"filter": {"site-id": self.domain_id}}
)
entries = []
for record in result["result"]:
LOGGER.debug("Record: %s", record)
if (rtype is not None) and (record["data"]["type"] != rtype):
LOGGER.debug(
"\tType doesn't match - expected: '%s', found: '%s'",
rtype,
record["data"]["type"],
)
continue
if (host is not None) and (record["data"]["host"] != host):
LOGGER.debug(
"\tHost doesn't match - expected: '%s', found: '%s'",
host,
record["data"]["host"],
)
continue
if (value is not None) and (record["data"]["value"] != value):
LOGGER.debug(
"\tValue doesn't match - expected: '%s', found: '%s'",
value,
record["data"]["value"],
)
continue
entry = {
"id": record["id"],
"type": record["data"]["type"],
"name": self._full_name(record["data"]["host"]),
"ttl": None,
"options": {},
}
if record["data"]["type"] in ["CNAME"]:
entry["content"] = record["data"]["value"].rstrip(".")
else:
entry["content"] = record["data"]["value"]
if record["data"]["type"] == "MX":
entry["options"]["mx"] = {"priority": int(record["data"]["opt"])}
entries.append(entry)
return entries
def __delete_dns_records_by_id(self, ids):
if not ids:
return
req = []
for i in ids:
req.append({"del_rec": {"filter": {"id": i}}})
self.__plesk_request({"dns": req})
def _request(self, action="GET", url="/", data=None, query_params=None):
# Helper _request is not used for Plesk provider
pass
def _etree_to_dict(t: Element) -> Optional[Dict]:
d: Optional[Dict] = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(_etree_to_dict, children):
if dc:
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib and d:
d[t.tag].update(("@" + k, v) for k, v in t.attrib.items())
if t.text and d:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]["#text"] = text
else:
d[t.tag] = text
return d
def _dict_to_etree(d: Dict) -> Element:
def _to_etree(d1, root):
if not d1:
pass
elif isinstance(d1, str):
root.text = d1
elif isinstance(d1, dict):
for k, v in d1.items():
assert isinstance(k, str)
if k.startswith("#"):
assert k == "#text" and isinstance(v, str)
root.text = v
elif k.startswith("@"):
assert isinstance(v, str)
root.set(k[1:], v)
elif isinstance(v, list):
for e in v:
_to_etree(e, cElementTree.SubElement(root, k))
else:
_to_etree(v, cElementTree.SubElement(root, k))
else:
raise TypeError("invalid type: " + str(type(d1)))
assert isinstance(d, dict) and len(d) == 1
tag, body = next(iter(d.items()))
node = cElementTree.Element(tag)
_to_etree(body, node)
return node
<|code_end|>
| lexicon/providers/plesk.py
<|code_start|>"""
Lexicon Plesk Provider
Author: Jens Reimann, 2018
API Docs: https://docs.plesk.com/en-US/onyx/api-rpc
"""
import logging
from collections import defaultdict
from typing import Dict, List, Optional
from xml.etree import cElementTree
from xml.etree.ElementTree import Element
import requests
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
PLEX_URL_SUFFIX = "/enterprise/control/agent.php"
NAMESERVER_DOMAINS: List[str] = []
def provider_parser(subparser):
"""Configure provider parser for Plesk"""
subparser.add_argument(
"--auth-username", help="specify username for authentication"
)
subparser.add_argument(
"--auth-password", help="specify password for authentication"
)
subparser.add_argument(
"--plesk-server", help="specify URL to the Plesk Web UI, including the port"
)
class Provider(BaseProvider):
"""Provider class for Plesk"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api_endpoint = self._get_provider_option("plesk_server")
if self.api_endpoint.endswith("/"):
self.api_endpoint = self.api_endpoint[:-1]
if not self.api_endpoint.endswith(PLEX_URL_SUFFIX):
self.api_endpoint += PLEX_URL_SUFFIX
self.domain_id = None
self.username = self._get_provider_option("auth_username")
assert self.username is not None
self.password = self._get_provider_option("auth_password")
assert self.password is not None
def __simple_request(self, rtype, operation, req):
response = self.__plesk_request({rtype: {operation: req}})[rtype][operation]
result = response["result"]
if isinstance(result, list):
for record in result:
if record["status"] == "error":
raise Exception(
f"API returned at least one error: {record['errtext']}"
)
elif response["result"]["status"] == "error":
errcode = response["result"]["errcode"]
errtext = response["result"]["errtext"]
raise Exception(f"API returned error: {errcode} ({errtext})")
return response
def __plesk_request(self, request):
headers = {
"Content-type": "text/xml",
"HTTP_PRETTY_PRINT": "TRUE",
"HTTP_AUTH_LOGIN": self.username,
"HTTP_AUTH_PASSWD": self.password,
}
xml = f"""\
<?xml version="1.0" encoding="utf-8"?>
{cElementTree.tostring(_dict_to_etree({"packet": request}), encoding="unicode")}\
"""
LOGGER.debug("Request: %s", xml)
response = requests.post(
self.api_endpoint,
headers=headers,
data=xml,
auth=(self.username, self.password),
)
data = response.text
LOGGER.debug("Response: %s", data)
result = _etree_to_dict(cElementTree.XML(data))
return result["packet"]
def __find_site(self):
return self.__simple_request(
"site", "get", {"filter": {"name": self.domain, "dataset": {}}}
)["result"]["id"]
def _authenticate(self):
self.domain_id = self.__find_site()
if self.domain_id is None:
raise Exception("Domain not found")
def _create_record(self, rtype, name, content):
return self.__create_entry(rtype, name, content, None)
def _list_records(self, rtype=None, name=None, content=None):
entries = self.__find_dns_entries(rtype, name, content)
LOGGER.debug("list_records: %s", entries)
return entries
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier is None:
entries = self.__find_dns_entries(rtype, name, None)
LOGGER.debug("Entries found: %s", entries)
if not entries:
raise Exception("No entry found for updating")
identifier = entries[0]["id"]
entry = self.__get_dns_entry(identifier)
ids = []
for an_entry in entries:
ids.append(an_entry["id"])
self.__delete_dns_records_by_id(ids)
else:
entry = self.__get_dns_entry(identifier)
self.__delete_dns_records_by_id([identifier])
assert entry is not None
LOGGER.debug("Updating: %s", entry)
if rtype:
entry["type"] = rtype
if name:
entry["host"] = name
if content:
entry["value"] = content
return self.__create_entry(
entry["type"], entry["host"], entry["value"], entry["opt"]
)
def __create_entry(self, rtype, host, value, opt):
entries = self.__find_dns_entries(rtype, self._fqdn_name(host), value)
if entries:
return True # already exists
self.__simple_request(
"dns",
"add_rec",
{
"site-id": self.domain_id,
"type": rtype,
"host": self._relative_name(host),
"value": value,
"opt": opt,
},
)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier:
self.__delete_dns_records_by_id([identifier])
return True
entries = self.__find_dns_entries(rtype, self._fqdn_name(name), content)
ids = []
for entry in entries:
ids.append(entry["id"])
self.__delete_dns_records_by_id(ids)
return bool(ids)
def __get_dns_entry(self, identifier):
return self.__simple_request("dns", "get_rec", {"filter": {"id": identifier}})[
"result"
]["data"]
def __find_dns_entries(self, rtype=None, host=None, value=None):
LOGGER.debug("Searching for: %s, %s, %s", rtype, host, value)
if value and rtype and rtype in ["CNAME"]:
LOGGER.debug("CNAME transformation")
value = value.rstrip(".") + "."
if host:
host = self._fqdn_name(host)
result = self.__simple_request(
"dns", "get_rec", {"filter": {"site-id": self.domain_id}}
)
entries = []
for record in result["result"]:
LOGGER.debug("Record: %s", record)
if (rtype is not None) and (record["data"]["type"] != rtype):
LOGGER.debug(
"\tType doesn't match - expected: '%s', found: '%s'",
rtype,
record["data"]["type"],
)
continue
if (host is not None) and (record["data"]["host"] != host):
LOGGER.debug(
"\tHost doesn't match - expected: '%s', found: '%s'",
host,
record["data"]["host"],
)
continue
if (value is not None) and (record["data"]["value"] != value):
LOGGER.debug(
"\tValue doesn't match - expected: '%s', found: '%s'",
value,
record["data"]["value"],
)
continue
entry = {
"id": record["id"],
"type": record["data"]["type"],
"name": self._full_name(record["data"]["host"]),
"ttl": None,
"options": {},
}
if record["data"]["type"] in ["CNAME"]:
entry["content"] = record["data"]["value"].rstrip(".")
else:
entry["content"] = record["data"]["value"]
if record["data"]["type"] == "MX":
entry["options"]["mx"] = {"priority": int(record["data"]["opt"])}
entries.append(entry)
return entries
def __delete_dns_records_by_id(self, ids):
if not ids:
return
req = []
for i in ids:
req.append({"del_rec": {"filter": {"id": i}}})
self.__plesk_request({"dns": req})
def _request(self, action="GET", url="/", data=None, query_params=None):
# Helper _request is not used for Plesk provider
pass
def _etree_to_dict(t: Element) -> Optional[Dict]:
d: Optional[Dict] = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(_etree_to_dict, children):
if dc:
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib and d:
d[t.tag].update(("@" + k, v) for k, v in t.attrib.items())
if t.text and d:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]["#text"] = text
else:
d[t.tag] = text
return d
def _dict_to_etree(d: Dict) -> Element:
def _to_etree(d1, root):
if not d1:
pass
elif isinstance(d1, str):
root.text = d1
elif isinstance(d1, dict):
for k, v in d1.items():
assert isinstance(k, str)
if k.startswith("#"):
assert k == "#text" and isinstance(v, str)
root.text = v
elif k.startswith("@"):
assert isinstance(v, str)
root.set(k[1:], v)
elif isinstance(v, list):
for e in v:
_to_etree(e, cElementTree.SubElement(root, k))
else:
_to_etree(v, cElementTree.SubElement(root, k))
else:
raise TypeError("invalid type: " + str(type(d1)))
assert isinstance(d, dict) and len(d) == 1
tag, body = next(iter(d.items()))
node = cElementTree.Element(tag)
_to_etree(body, node)
return node
<|code_end|>
|
Dreamhost integration will break November 2nd, 2021
It looks like Dreamhost is "retiring" some of it's API calls Nov 2nd, including `domain-list_domains` which Lexicon seems to use for validating the domain during initial auth:
https://help.dreamhost.com/hc/en-us/articles/217555767-Domain-API-commands
https://github.com/AnalogJ/lexicon/blob/db82a948febee04972bb648ac59471b292c7e394/lexicon/providers/dreamhost.py#L103
They have begin sending email to anyone using the API:
> The following API commands were run on your account in the last thirty days and will no longer function after November 2nd:
> - domain-list_domains
| lexicon/providers/dreamhost.py
<|code_start|>"""Module provider for Dreamhost"""
import base64
import json
import logging
import time
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["dreamhost.com"]
_DATA_NON_EXIST_ERROR_LIST = [
"no_record",
"no_type",
"no_value",
"no_such_record",
"no_such_type",
"no_such_value",
"no_such_zone",
]
_DATA_ALREADY_EXIST_ERROR_LIST = [
"record_already_exists_not_editable",
"record_already_exists_remove_first",
"CNAME_already_on_record",
]
class NonExistError(Exception):
"""NonExistError"""
class AlreadyExistError(Exception):
"""AlreadyExistError"""
def provider_parser(subparser):
"""Module provider for Dreamhost"""
subparser.add_argument("--auth-token", help="specify api key for authentication")
class Provider(BaseProvider):
"""Provider class for Dreamhost"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://api.dreamhost.com/"
# Dreamhost provides no identifier for a record.
# Furthermore, Dreamhost requires type, record, value to delete a record.
# The record defined in lexicon is {type, name, content, id}
# We use base64(json({'type', 'name', 'content'}))
# as the identifier of Dreamhost record.
@staticmethod
def _identifier(dreamhost_record):
id_struct = {
"type": dreamhost_record["type"],
"name": dreamhost_record["record"],
"content": dreamhost_record["value"],
}
return base64.urlsafe_b64encode(json.dumps(id_struct).encode("utf-8")).decode(
"utf-8"
)
# The information in identifier follows the record in lexicon.
# Provider._record_to_dreamhost_record transfers to dreamhost-based record.
@staticmethod
def _id_to_dreamhost_record(identifier):
record = json.loads(
base64.urlsafe_b64decode(identifier.encode("utf-8")).decode("utf-8")
)
dreamhost_record = Provider._record_to_dreamhost_record(record)
return dreamhost_record
# The information in identifier follows the record in lexicon.
# 'id' is added in the record.
@staticmethod
def _id_to_record(identifier):
record = json.loads(
base64.urlsafe_b64decode(identifier.encode("utf-8")).decode("utf-8")
)
record["id"] = identifier
return record
# Transferring lexicon-based record to Dreamhost-based record.
@staticmethod
def _record_to_dreamhost_record(record):
dreamhost_record = {
"type": record["type"],
"record": record["name"],
"value": record["content"],
}
return dreamhost_record
def _authenticate(self):
self.domain_id = None
payload = self._get("domain-list_domains")
data = payload.get("data", None)
if data is None:
raise AuthenticationError("Domain not found")
for domain in data:
if domain.get("domain", "") == self.domain:
self.domain_id = self.domain
if self.domain_id is None:
raise AuthenticationError("Domain not found")
def _create_record(self, rtype, name, content):
name = self._full_name(name)
try:
self._get(
"dns-add_record",
query_params={"record": name, "type": rtype, "value": content},
)
except AlreadyExistError:
pass
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
payload = self._get("dns-list_records")
resource_list = payload.get("data", None)
if not isinstance(resource_list, list):
raise Exception(f"unable to get records: {payload}")
resource_list = [
resource for resource in resource_list if resource["zone"] == self.domain
]
if rtype:
resource_list = [
resource for resource in resource_list if resource["type"] == rtype
]
if name:
name = self._full_name(name)
resource_list = [
resource for resource in resource_list if resource["record"] == name
]
if content:
resource_list = [
resource for resource in resource_list if resource["value"] == content
]
processed_records = []
for dreamhost_record in resource_list:
processed_records.append(
{
"id": Provider._identifier(dreamhost_record),
"type": dreamhost_record["type"],
"name": dreamhost_record["record"],
"content": dreamhost_record["value"],
}
)
return processed_records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier:
try:
self._delete_record(identifier)
except NonExistError:
pass
return self._create_record(rtype=rtype, name=name, content=content)
# Delete existing records.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
to_deletes = []
if identifier:
record = Provider._id_to_record(identifier)
to_deletes.append(record)
else:
records = self._list_records(rtype=rtype, name=name, content=content)
to_deletes = records
# for-loop to delete deletes.
err = None
for each in to_deletes:
try:
dreamhost_record = Provider._record_to_dreamhost_record(each)
self._get("dns-remove_record", query_params=dreamhost_record)
except Exception as exception:
err = exception
# Sleeping for 1-second to avoid trigerring ddos protecting in case of looped requests
time.sleep(1)
if err is not None:
raise err
return True
# Helpers
def _request(self, action="GET", url="", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
query_params["key"] = self._get_provider_option("auth_token")
query_params["format"] = "json"
if "cmd" not in query_params:
query_params["cmd"] = url
response = requests.request(
action,
self.api_endpoint,
params=query_params,
data=json.dumps(data),
headers=default_headers,
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
result = response.json()
if result.get("result", "") != "success":
err_msg = result.get("data", "")
if err_msg in _DATA_NON_EXIST_ERROR_LIST:
raise NonExistError(f"Dreamhost non-exist error: {result}")
if err_msg in _DATA_ALREADY_EXIST_ERROR_LIST:
raise AlreadyExistError(f"Dreamhost already-exist error: {result}")
raise Exception(f"Dreamhost api error: {result}")
return result
<|code_end|>
| lexicon/providers/dreamhost.py
<|code_start|>"""Module provider for Dreamhost"""
import base64
import json
import logging
import time
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["dreamhost.com"]
_DATA_NON_EXIST_ERROR_LIST = [
"no_record",
"no_type",
"no_value",
"no_such_record",
"no_such_type",
"no_such_value",
"no_such_zone",
]
_DATA_ALREADY_EXIST_ERROR_LIST = [
"record_already_exists_not_editable",
"record_already_exists_remove_first",
"CNAME_already_on_record",
]
class NonExistError(Exception):
"""NonExistError"""
class AlreadyExistError(Exception):
"""AlreadyExistError"""
def provider_parser(subparser):
"""Module provider for Dreamhost"""
subparser.add_argument("--auth-token", help="specify api key for authentication")
class Provider(BaseProvider):
"""Provider class for Dreamhost"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://api.dreamhost.com/"
# Dreamhost provides no identifier for a record.
# Furthermore, Dreamhost requires type, record, value to delete a record.
# The record defined in lexicon is {type, name, content, id}
# We use base64(json({'type', 'name', 'content'}))
# as the identifier of Dreamhost record.
@staticmethod
def _identifier(dreamhost_record):
id_struct = {
"type": dreamhost_record["type"],
"name": dreamhost_record["record"],
"content": dreamhost_record["value"],
}
return base64.urlsafe_b64encode(json.dumps(id_struct).encode("utf-8")).decode(
"utf-8"
)
# The information in identifier follows the record in lexicon.
# Provider._record_to_dreamhost_record transfers to dreamhost-based record.
@staticmethod
def _id_to_dreamhost_record(identifier):
record = json.loads(
base64.urlsafe_b64decode(identifier.encode("utf-8")).decode("utf-8")
)
dreamhost_record = Provider._record_to_dreamhost_record(record)
return dreamhost_record
# The information in identifier follows the record in lexicon.
# 'id' is added in the record.
@staticmethod
def _id_to_record(identifier):
record = json.loads(
base64.urlsafe_b64decode(identifier.encode("utf-8")).decode("utf-8")
)
record["id"] = identifier
return record
# Transferring lexicon-based record to Dreamhost-based record.
@staticmethod
def _record_to_dreamhost_record(record):
dreamhost_record = {
"type": record["type"],
"record": record["name"],
"value": record["content"],
}
return dreamhost_record
def _authenticate(self):
self.domain_id = None
payload = self._get("dns-list_records")
data = payload.get("data", None)
if data is None:
raise AuthenticationError("Domain not found")
for record in data:
if record.get("record", "") == self.domain and record.get("type", "") in ["A", "AAAA"]:
self.domain_id = self.domain
break
if self.domain_id is None:
raise AuthenticationError("Domain not found")
def _create_record(self, rtype, name, content):
name = self._full_name(name)
try:
self._get(
"dns-add_record",
query_params={"record": name, "type": rtype, "value": content},
)
except AlreadyExistError:
pass
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
payload = self._get("dns-list_records")
resource_list = payload.get("data", None)
if not isinstance(resource_list, list):
raise Exception(f"unable to get records: {payload}")
resource_list = [
resource for resource in resource_list if resource["zone"] == self.domain
]
if rtype:
resource_list = [
resource for resource in resource_list if resource["type"] == rtype
]
if name:
name = self._full_name(name)
resource_list = [
resource for resource in resource_list if resource["record"] == name
]
if content:
resource_list = [
resource for resource in resource_list if resource["value"] == content
]
processed_records = []
for dreamhost_record in resource_list:
processed_records.append(
{
"id": Provider._identifier(dreamhost_record),
"type": dreamhost_record["type"],
"name": dreamhost_record["record"],
"content": dreamhost_record["value"],
}
)
return processed_records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier:
try:
self._delete_record(identifier)
except NonExistError:
pass
return self._create_record(rtype=rtype, name=name, content=content)
# Delete existing records.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
to_deletes = []
if identifier:
record = Provider._id_to_record(identifier)
to_deletes.append(record)
else:
records = self._list_records(rtype=rtype, name=name, content=content)
to_deletes = records
# for-loop to delete deletes.
err = None
for each in to_deletes:
try:
dreamhost_record = Provider._record_to_dreamhost_record(each)
self._get("dns-remove_record", query_params=dreamhost_record)
except Exception as exception:
err = exception
# Sleeping for 1-second to avoid trigerring ddos protecting in case of looped requests
time.sleep(1)
if err is not None:
raise err
return True
# Helpers
def _request(self, action="GET", url="", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
query_params["key"] = self._get_provider_option("auth_token")
query_params["format"] = "json"
if "cmd" not in query_params:
query_params["cmd"] = url
response = requests.request(
action,
self.api_endpoint,
params=query_params,
data=json.dumps(data),
headers=default_headers,
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
result = response.json()
if result.get("result", "") != "success":
err_msg = result.get("data", "")
if err_msg in _DATA_NON_EXIST_ERROR_LIST:
raise NonExistError(f"Dreamhost non-exist error: {result}")
if err_msg in _DATA_ALREADY_EXIST_ERROR_LIST:
raise AlreadyExistError(f"Dreamhost already-exist error: {result}")
raise Exception(f"Dreamhost api error: {result}")
return result
<|code_end|>
|
Plesk provider not working: invalid request (error code 1014)
The Plesk provider seems to be not working at all:
```
lexicon plesk --log_level DEBUG --auth-username apiuser --auth-password secret --plesk-server "https://plesk-api-host:8443" list mydomain.com A
```
leads to:
```
Arguments: Namespace(delegated=None, config_dir='/home/spike/ansible', provider_name='plesk', action='list', domain='mydomain.com', type='A', name=None, content=None, ttl=None, priority=None, identifier=None, log_level='DEBUG', output='TABLE', auth_username='apiuser', auth_password='secret', plesk_server='https://plesk-api-host:8443')
unable to cache publicsuffix.org-tlds.{'urls': ('https://publicsuffix.org/list/public_suffix_list.dat', 'https://raw.githubusercontent.com/publicsuffix/list/master/public_suffix_list.dat'), 'fallback_to_snapshot': True} in /home/spike/.lexicon_tld_set/publicsuffix.org-tlds/de84b5ca2167d4c83e38fb162f2e8738.tldextract.json. This could refresh the Public Suffix List over HTTP every app startup. Construct your `TLDExtract` with a writable `cache_dir` or set `cache_dir=False` to silence this warning. [Errno 20] Not a directory: '/home/spike/.lexicon_tld_set/publicsuffix.org-tlds'
Starting new HTTPS connection (1): publicsuffix.org:443
https://publicsuffix.org:443 "GET /list/public_suffix_list.dat HTTP/1.1" 200 None
Request: <?xml version="1.0" encoding="utf-8"?>
<packet><site><get><filter><name>mydomain.com</name><dataset /></filter></get></site></packet>
Starting new HTTPS connection (1): plesk-api-host:8443
https://plesk-api-host:8443 "POST /enterprise/control/agent.php HTTP/1.1" 200 None
Response: <?xml version="1.0"?>
<packet version="1.6.9.1">
<system>
<status>error</status>
<errcode>1014</errcode>
<errtext>Parser error: Request is invalid. Error in line 2: Element 'dataset': This element is not expected. Expected is ( name ). Error in line 2: Element 'get': Missing child element(s). Expected is ( dataset ).</errtext>
</system> </packet>
Traceback (most recent call last):
File "/home/spike/.local/bin/lexicon", line 8, in <module>
sys.exit(main())
File "/home/spike/.local/lib/python3.9/site-packages/lexicon/cli.py", line 131, in main
results = client.execute()
File "/home/spike/.local/lib/python3.9/site-packages/lexicon/client.py", line 79, in execute
self.provider.authenticate()
File "/home/spike/.local/lib/python3.9/site-packages/lexicon/providers/base.py", line 73, in authenticate
self._authenticate()
File "/home/spike/.local/lib/python3.9/site-packages/lexicon/providers/plesk.py", line 115, in _authenticate
self.domain_id = self.__find_site()
File "/home/spike/.local/lib/python3.9/site-packages/lexicon/providers/plesk.py", line 110, in __find_site
return self.__simple_request(
File "/home/spike/.local/lib/python3.9/site-packages/lexicon/providers/plesk.py", line 63, in __simple_request
response = self.__plesk_request({rtype: {operation: req}})[rtype][operation]
KeyError: 'site'
```
Other actions are affected similarly as well.
This seems to be because the request:
```
<?xml version="1.0" encoding="utf-8"?>
<packet><site><get><filter><name>mydomain.com</name><dataset /></filter></get></site></packet>
```
is not valid according to the api documentation: https://docs.plesk.com/en-US/obsidian/api-rpc/about-xml-api/reference/managing-sites-domains/getting-information-about-sites.66583/ (`dataset` is not a valid child of the `filter` node, it needs to be a child of the `get` node)
| lexicon/providers/plesk.py
<|code_start|>"""
Lexicon Plesk Provider
Author: Jens Reimann, 2018
API Docs: https://docs.plesk.com/en-US/onyx/api-rpc
"""
import logging
from collections import defaultdict
from typing import Dict, List, Optional
from xml.etree import cElementTree
from xml.etree.ElementTree import Element
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
PLEX_URL_SUFFIX = "/enterprise/control/agent.php"
NAMESERVER_DOMAINS: List[str] = []
def provider_parser(subparser):
"""Configure provider parser for Plesk"""
subparser.add_argument(
"--auth-username", help="specify username for authentication"
)
subparser.add_argument(
"--auth-password", help="specify password for authentication"
)
subparser.add_argument(
"--plesk-server", help="specify URL to the Plesk Web UI, including the port"
)
class Provider(BaseProvider):
"""Provider class for Plesk"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api_endpoint = self._get_provider_option("plesk_server")
if self.api_endpoint.endswith("/"):
self.api_endpoint = self.api_endpoint[:-1]
if not self.api_endpoint.endswith(PLEX_URL_SUFFIX):
self.api_endpoint += PLEX_URL_SUFFIX
self.domain_id = None
self.username = self._get_provider_option("auth_username")
assert self.username is not None
self.password = self._get_provider_option("auth_password")
assert self.password is not None
def __simple_request(self, rtype, operation, req):
response = self.__plesk_request({rtype: {operation: req}})[rtype][operation]
result = response["result"]
if isinstance(result, list):
for record in result:
if record["status"] == "error":
raise Exception(
f"API returned at least one error: {record['errtext']}"
)
elif response["result"]["status"] == "error":
errcode = response["result"]["errcode"]
errtext = response["result"]["errtext"]
raise Exception(f"API returned error: {errcode} ({errtext})")
return response
def __plesk_request(self, request):
headers = {
"Content-type": "text/xml",
"HTTP_PRETTY_PRINT": "TRUE",
"HTTP_AUTH_LOGIN": self.username,
"HTTP_AUTH_PASSWD": self.password,
}
xml = f"""\
<?xml version="1.0" encoding="utf-8"?>
{cElementTree.tostring(_dict_to_etree({"packet": request}), encoding="unicode")}\
"""
LOGGER.debug("Request: %s", xml)
response = requests.post(
self.api_endpoint,
headers=headers,
data=xml,
auth=(self.username, self.password),
)
data = response.text
LOGGER.debug("Response: %s", data)
result = _etree_to_dict(cElementTree.XML(data))
return result["packet"]
def __find_site(self):
return self.__simple_request(
"site", "get", {"filter": {"name": self.domain, "dataset": {}}}
)["result"]["id"]
def _authenticate(self):
self.domain_id = self.__find_site()
if self.domain_id is None:
raise AuthenticationError("Domain not found")
def _create_record(self, rtype, name, content):
return self.__create_entry(rtype, name, content, None)
def _list_records(self, rtype=None, name=None, content=None):
entries = self.__find_dns_entries(rtype, name, content)
LOGGER.debug("list_records: %s", entries)
return entries
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier is None:
entries = self.__find_dns_entries(rtype, name, None)
LOGGER.debug("Entries found: %s", entries)
if not entries:
raise Exception("No entry found for updating")
identifier = entries[0]["id"]
entry = self.__get_dns_entry(identifier)
ids = []
for an_entry in entries:
ids.append(an_entry["id"])
self.__delete_dns_records_by_id(ids)
else:
entry = self.__get_dns_entry(identifier)
self.__delete_dns_records_by_id([identifier])
assert entry is not None
LOGGER.debug("Updating: %s", entry)
if rtype:
entry["type"] = rtype
if name:
entry["host"] = name
if content:
entry["value"] = content
return self.__create_entry(
entry["type"], entry["host"], entry["value"], entry["opt"]
)
def __create_entry(self, rtype, host, value, opt):
entries = self.__find_dns_entries(rtype, self._fqdn_name(host), value)
if entries:
return True # already exists
self.__simple_request(
"dns",
"add_rec",
{
"site-id": self.domain_id,
"type": rtype,
"host": self._relative_name(host),
"value": value,
"opt": opt,
},
)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier:
self.__delete_dns_records_by_id([identifier])
return True
entries = self.__find_dns_entries(rtype, self._fqdn_name(name), content)
ids = []
for entry in entries:
ids.append(entry["id"])
self.__delete_dns_records_by_id(ids)
return bool(ids)
def __get_dns_entry(self, identifier):
return self.__simple_request("dns", "get_rec", {"filter": {"id": identifier}})[
"result"
]["data"]
def __find_dns_entries(self, rtype=None, host=None, value=None):
LOGGER.debug("Searching for: %s, %s, %s", rtype, host, value)
if value and rtype and rtype in ["CNAME"]:
LOGGER.debug("CNAME transformation")
value = value.rstrip(".") + "."
if host:
host = self._fqdn_name(host)
result = self.__simple_request(
"dns", "get_rec", {"filter": {"site-id": self.domain_id}}
)
entries = []
for record in result["result"]:
LOGGER.debug("Record: %s", record)
if (rtype is not None) and (record["data"]["type"] != rtype):
LOGGER.debug(
"\tType doesn't match - expected: '%s', found: '%s'",
rtype,
record["data"]["type"],
)
continue
if (host is not None) and (record["data"]["host"] != host):
LOGGER.debug(
"\tHost doesn't match - expected: '%s', found: '%s'",
host,
record["data"]["host"],
)
continue
if (value is not None) and (record["data"]["value"] != value):
LOGGER.debug(
"\tValue doesn't match - expected: '%s', found: '%s'",
value,
record["data"]["value"],
)
continue
entry = {
"id": record["id"],
"type": record["data"]["type"],
"name": self._full_name(record["data"]["host"]),
"ttl": None,
"options": {},
}
if record["data"]["type"] in ["CNAME"]:
entry["content"] = record["data"]["value"].rstrip(".")
else:
entry["content"] = record["data"]["value"]
if record["data"]["type"] == "MX":
entry["options"]["mx"] = {"priority": int(record["data"]["opt"])}
entries.append(entry)
return entries
def __delete_dns_records_by_id(self, ids):
if not ids:
return
req = []
for i in ids:
req.append({"del_rec": {"filter": {"id": i}}})
self.__plesk_request({"dns": req})
def _request(self, action="GET", url="/", data=None, query_params=None):
# Helper _request is not used for Plesk provider
pass
def _etree_to_dict(t: Element) -> Optional[Dict]:
d: Optional[Dict] = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(_etree_to_dict, children):
if dc:
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib and d:
d[t.tag].update(("@" + k, v) for k, v in t.attrib.items())
if t.text and d:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]["#text"] = text
else:
d[t.tag] = text
return d
def _dict_to_etree(d: Dict) -> Element:
def _to_etree(d1, root):
if not d1:
pass
elif isinstance(d1, str):
root.text = d1
elif isinstance(d1, dict):
for k, v in d1.items():
assert isinstance(k, str)
if k.startswith("#"):
assert k == "#text" and isinstance(v, str)
root.text = v
elif k.startswith("@"):
assert isinstance(v, str)
root.set(k[1:], v)
elif isinstance(v, list):
for e in v:
_to_etree(e, cElementTree.SubElement(root, k))
else:
_to_etree(v, cElementTree.SubElement(root, k))
else:
raise TypeError("invalid type: " + str(type(d1)))
assert isinstance(d, dict) and len(d) == 1
tag, body = next(iter(d.items()))
node = cElementTree.Element(tag)
_to_etree(body, node)
return node
<|code_end|>
| lexicon/providers/plesk.py
<|code_start|>"""
Lexicon Plesk Provider
Author: Jens Reimann, 2018
API Docs: https://docs.plesk.com/en-US/onyx/api-rpc
"""
import logging
from collections import defaultdict
from typing import Dict, List, Optional
from xml.etree import cElementTree
from xml.etree.ElementTree import Element
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
PLEX_URL_SUFFIX = "/enterprise/control/agent.php"
NAMESERVER_DOMAINS: List[str] = []
def provider_parser(subparser):
"""Configure provider parser for Plesk"""
subparser.add_argument(
"--auth-username", help="specify username for authentication"
)
subparser.add_argument(
"--auth-password", help="specify password for authentication"
)
subparser.add_argument(
"--plesk-server", help="specify URL to the Plesk Web UI, including the port"
)
class Provider(BaseProvider):
"""Provider class for Plesk"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api_endpoint = self._get_provider_option("plesk_server")
if self.api_endpoint.endswith("/"):
self.api_endpoint = self.api_endpoint[:-1]
if not self.api_endpoint.endswith(PLEX_URL_SUFFIX):
self.api_endpoint += PLEX_URL_SUFFIX
self.domain_id = None
self.username = self._get_provider_option("auth_username")
assert self.username is not None
self.password = self._get_provider_option("auth_password")
assert self.password is not None
def __simple_request(self, rtype, operation, req):
response = self.__plesk_request({rtype: {operation: req}})[rtype][operation]
result = response["result"]
if isinstance(result, list):
for record in result:
if record["status"] == "error":
raise Exception(
f"API returned at least one error: {record['errtext']}"
)
elif response["result"]["status"] == "error":
errcode = response["result"]["errcode"]
errtext = response["result"]["errtext"]
raise Exception(f"API returned error: {errcode} ({errtext})")
return response
def __plesk_request(self, request):
headers = {
"Content-type": "text/xml",
"HTTP_PRETTY_PRINT": "TRUE",
"HTTP_AUTH_LOGIN": self.username,
"HTTP_AUTH_PASSWD": self.password,
}
xml = f"""\
<?xml version="1.0" encoding="utf-8"?>
{cElementTree.tostring(_dict_to_etree({"packet": request}), encoding="unicode")}\
"""
LOGGER.debug("Request: %s", xml)
response = requests.post(
self.api_endpoint,
headers=headers,
data=xml,
auth=(self.username, self.password),
)
data = response.text
LOGGER.debug("Response: %s", data)
result = _etree_to_dict(cElementTree.XML(data))
return result["packet"]
def __find_site(self):
return self.__simple_request(
"site", "get", {"filter": {"name": self.domain}, "dataset": {}}
)["result"]["id"]
def _authenticate(self):
self.domain_id = self.__find_site()
if self.domain_id is None:
raise AuthenticationError("Domain not found")
def _create_record(self, rtype, name, content):
return self.__create_entry(rtype, name, content, None)
def _list_records(self, rtype=None, name=None, content=None):
entries = self.__find_dns_entries(rtype, name, content)
LOGGER.debug("list_records: %s", entries)
return entries
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier is None:
entries = self.__find_dns_entries(rtype, name, None)
LOGGER.debug("Entries found: %s", entries)
if not entries:
raise Exception("No entry found for updating")
identifier = entries[0]["id"]
entry = self.__get_dns_entry(identifier)
ids = []
for an_entry in entries:
ids.append(an_entry["id"])
self.__delete_dns_records_by_id(ids)
else:
entry = self.__get_dns_entry(identifier)
self.__delete_dns_records_by_id([identifier])
assert entry is not None
LOGGER.debug("Updating: %s", entry)
if rtype:
entry["type"] = rtype
if name:
entry["host"] = name
if content:
entry["value"] = content
return self.__create_entry(
entry["type"], entry["host"], entry["value"], entry["opt"]
)
def __create_entry(self, rtype, host, value, opt):
entries = self.__find_dns_entries(rtype, self._fqdn_name(host), value)
if entries:
return True # already exists
self.__simple_request(
"dns",
"add_rec",
{
"site-id": self.domain_id,
"type": rtype,
"host": self._relative_name(host),
"value": value,
"opt": opt,
},
)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier:
self.__delete_dns_records_by_id([identifier])
return True
entries = self.__find_dns_entries(rtype, self._fqdn_name(name), content)
ids = []
for entry in entries:
ids.append(entry["id"])
self.__delete_dns_records_by_id(ids)
return bool(ids)
def __get_dns_entry(self, identifier):
return self.__simple_request("dns", "get_rec", {"filter": {"id": identifier}})[
"result"
]["data"]
def __find_dns_entries(self, rtype=None, host=None, value=None):
LOGGER.debug("Searching for: %s, %s, %s", rtype, host, value)
if value and rtype and rtype in ["CNAME"]:
LOGGER.debug("CNAME transformation")
value = value.rstrip(".") + "."
if host:
host = self._fqdn_name(host)
result = self.__simple_request(
"dns", "get_rec", {"filter": {"site-id": self.domain_id}}
)
entries = []
for record in result["result"]:
LOGGER.debug("Record: %s", record)
if (rtype is not None) and (record["data"]["type"] != rtype):
LOGGER.debug(
"\tType doesn't match - expected: '%s', found: '%s'",
rtype,
record["data"]["type"],
)
continue
if (host is not None) and (record["data"]["host"] != host):
LOGGER.debug(
"\tHost doesn't match - expected: '%s', found: '%s'",
host,
record["data"]["host"],
)
continue
if (value is not None) and (record["data"]["value"] != value):
LOGGER.debug(
"\tValue doesn't match - expected: '%s', found: '%s'",
value,
record["data"]["value"],
)
continue
entry = {
"id": record["id"],
"type": record["data"]["type"],
"name": self._full_name(record["data"]["host"]),
"ttl": None,
"options": {},
}
if record["data"]["type"] in ["CNAME"]:
entry["content"] = record["data"]["value"].rstrip(".")
else:
entry["content"] = record["data"]["value"]
if record["data"]["type"] == "MX":
entry["options"]["mx"] = {"priority": int(record["data"]["opt"])}
entries.append(entry)
return entries
def __delete_dns_records_by_id(self, ids):
if not ids:
return
req = []
for i in ids:
req.append({"del_rec": {"filter": {"id": i}}})
self.__plesk_request({"dns": req})
def _request(self, action="GET", url="/", data=None, query_params=None):
# Helper _request is not used for Plesk provider
pass
def _etree_to_dict(t: Element) -> Optional[Dict]:
d: Optional[Dict] = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(_etree_to_dict, children):
if dc:
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib and d:
d[t.tag].update(("@" + k, v) for k, v in t.attrib.items())
if t.text and d:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]["#text"] = text
else:
d[t.tag] = text
return d
def _dict_to_etree(d: Dict) -> Element:
def _to_etree(d1, root):
if not d1:
pass
elif isinstance(d1, str):
root.text = d1
elif isinstance(d1, dict):
for k, v in d1.items():
assert isinstance(k, str)
if k.startswith("#"):
assert k == "#text" and isinstance(v, str)
root.text = v
elif k.startswith("@"):
assert isinstance(v, str)
root.set(k[1:], v)
elif isinstance(v, list):
for e in v:
_to_etree(e, cElementTree.SubElement(root, k))
else:
_to_etree(v, cElementTree.SubElement(root, k))
else:
raise TypeError("invalid type: " + str(type(d1)))
assert isinstance(d, dict) and len(d) == 1
tag, body = next(iter(d.items()))
node = cElementTree.Element(tag)
_to_etree(body, node)
return node
<|code_end|>
|
joker: HTTP 414 client error when adding a new record
Hi,
I get a "414 Client error" when trying to add a new record to one of my zones hosted at Joker. Is there an easy fix or is that a limitation of their API?
```
lexicon joker create example.com A --name a --content 127.0.0.1
Traceback (most recent call last):
File "/home/nextgens/src/lexicon/env/bin/lexicon", line 10, in <module>
sys.exit(main())
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/cli.py", line 131, in main
results = client.execute()
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/client.py", line 88, in execute
return self.provider.create_record(record_type, name, content)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py", line 79, in create_record
return self._create_record(rtype, name, content)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/joker.py", line 165, in _create_record
self._apply_zonedata(zonedata)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/joker.py", line 272, in _apply_zonedata
query_params={"domain": self.domain_id, "zone": "\n".join(data)},
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py", line 173, in _get
return self._request("GET", url, query_params=query_params)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/joker.py", line 97, in _request
return _process_response(response)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/joker.py", line 354, in _process_response
response.raise_for_status()
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/requests/models.py", line 953, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 414 Client Error: Request URI too long for url: https://dmapi.joker.com/request/dns-zone-put?...
```
joker: HTTP 414 client error when adding a new record
Hi,
I get a "414 Client error" when trying to add a new record to one of my zones hosted at Joker. Is there an easy fix or is that a limitation of their API?
```
lexicon joker create example.com A --name a --content 127.0.0.1
Traceback (most recent call last):
File "/home/nextgens/src/lexicon/env/bin/lexicon", line 10, in <module>
sys.exit(main())
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/cli.py", line 131, in main
results = client.execute()
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/client.py", line 88, in execute
return self.provider.create_record(record_type, name, content)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py", line 79, in create_record
return self._create_record(rtype, name, content)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/joker.py", line 165, in _create_record
self._apply_zonedata(zonedata)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/joker.py", line 272, in _apply_zonedata
query_params={"domain": self.domain_id, "zone": "\n".join(data)},
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/base.py", line 173, in _get
return self._request("GET", url, query_params=query_params)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/joker.py", line 97, in _request
return _process_response(response)
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/lexicon/providers/joker.py", line 354, in _process_response
response.raise_for_status()
File "/home/nextgens/src/lexicon/env/lib/python3.7/site-packages/requests/models.py", line 953, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 414 Client Error: Request URI too long for url: https://dmapi.joker.com/request/dns-zone-put?...
```
| lexicon/providers/joker.py
<|code_start|>"""Module provider for Joker.com"""
# The Joker.com API is well documented.
# Several specificities compared to classic REST API in other providers:
# - everything is done with GET requests: all parameters (including actual records data
# and authentication data) are passed in the URLs,
# - DNS zones are represented in a "strongly" formatted flat format (not JSON or XML) that may
# remind a zone definition in bind9,
# - every operation requires to pass the entire updated zone through the API, so care must be
# taken to not alter unexpectedly other entries during the create/update/delete operations,
# - all headers and data are contained in the response body; then this kind of body is composed
# of several lines of type key: value containing the headers (including errors), then a blank
# line makes the separation with the data itself (see _process_response for the body parsing).
import binascii
import json
import logging
import re
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["ns.joker.com"]
API_BASE_URL = "https://dmapi.joker.com/request"
DNSZONE_ENTRY_PATTERN = re.compile(
r"^(.+?)\s+(\w+)\s+(.+?)\s+((?:\\\"(?:.+?)\\\")|(?:.+?))\s+(\d+)(?:\s+(\d+)\s+(\d+)\s+(.+?)|\s+(.+?)|)$"
)
def provider_parser(subparser):
"""Generate a subparser for Joker"""
subparser.description = """
The Joker.com provider requires a valid token for authentication.
You can create one in the section 'Manage Joker.com API access keys' of 'My Profile' in your Joker.com account.
"""
subparser.add_argument(
"--auth-token",
help="specify the API Key to connect to the Joker.com API",
)
class _Response:
def __init__(self, headers, data):
self.headers = headers
self.data = data
def __str__(self):
return json.dumps({"headers": self.headers, "data": self.data})
class Provider(BaseProvider):
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self._session_id = None
def _authenticate(self):
auth_token = self._get_provider_option("auth_token")
response = requests.get(API_BASE_URL + "/login", params={"api-key": auth_token})
result = _process_response(response)
self._session_id = result.headers["Auth-Sid"]
response = self._get(
"/query-domain-list", query_params={"pattern": self.domain, "showstatus": 1}
)
if not response.data:
raise AuthenticationError(
f"Domain {self.domain} is not registered with this account."
)
data = response.data[0]
items = data.split(" ")
domain_status = items[2].split(",")
if len(set(domain_status).difference(["production", "lock", "autorenew"])) > 0:
raise AuthenticationError(
f"Current status for domain {self.domain} is: {items[2]}"
)
self.domain_id = self.domain
def _request(self, action="GET", url="/", data=None, query_params=None):
if not query_params:
query_params = {}
query_params["auth-sid"] = self._session_id
response = requests.get(API_BASE_URL + url, params=query_params)
return _process_response(response)
def _list_records(self, rtype=None, name=None, content=None):
response = self._get("/dns-zone-get", query_params={"domain": self.domain_id})
zone_data = _extract_zonedata(response.data)
zone_data = [entry for entry in zone_data if entry["type"]]
if rtype:
zone_data = [entry for entry in zone_data if entry["type"] == rtype]
if name:
zone_data = [
entry
for entry in zone_data
if self._full_name(entry["label"]) == self._full_name(name)
]
if content:
zone_data = [
entry for entry in zone_data if entry["target"] == content.strip()
]
records = [
{
"type": entry["type"],
"name": self._full_name(entry["label"]),
"ttl": entry["ttl"],
"content": entry["target"],
"id": self._identifier(entry),
}
for entry in zone_data
]
LOGGER.debug("list_records: %s", records)
return records
def _create_record(self, rtype, name, content):
if not rtype or not name or not content:
raise Exception(
"Error, rtype, name and content are mandatory to create a record."
)
response = self._get("/dns-zone-get", query_params={"domain": self.domain_id})
zonedata = _extract_zonedata(response.data)
new_entry = {
"label": self._relative_name(name),
"type": rtype,
"pri": 0,
"target": content,
"ttl": self._get_lexicon_option("ttl"),
"valid-from": None,
"valid-to": None,
"parameters": None,
}
if any(
entry
for entry in zonedata
if self._identifier(new_entry) == self._identifier(entry)
):
LOGGER.debug(
"create_record (ignored, duplicate): %s", self._identifier(new_entry)
)
return True
zonedata.append(new_entry)
self._apply_zonedata(zonedata)
return True
def _update_record(self, identifier, rtype=None, name=None, content=None):
if not identifier and not (rtype and name):
raise Exception(
"Error, either identifier or rtype + name are mandatory to update a record."
)
response = self._get("/dns-zone-get", query_params={"domain": self.domain_id})
zonedata = _extract_zonedata(response.data)
selector_info = (
f"identifier={identifier}" if identifier else f"type={rtype},name={name}"
)
if identifier:
to_update = [
entry for entry in zonedata if self._identifier(entry) == identifier
]
else:
to_update = [
entry
for entry in zonedata
if entry["type"] == rtype
and self._full_name(entry["label"]) == self._full_name(name)
]
if not to_update:
raise Exception(f"Error, could not find a record for {selector_info}.")
if len(to_update) > 1:
raise Exception(
f"Error, found more than one record for {selector_info}. "
"Please use an identifier to select one explicitly."
)
to_update[0].update(
{
"type": rtype if rtype else to_update[0]["type"],
"label": self._relative_name(name) if name else to_update[0]["label"],
"target": content if content else to_update[0]["content"],
"ttl": self._get_lexicon_option("ttl"),
}
)
self._apply_zonedata(zonedata)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if not identifier and not rtype:
raise Exception(
"Error, either rtype or identifier are mandatory to delete a record."
)
response = self._get("/dns-zone-get", query_params={"domain": self.domain_id})
zonedata = _extract_zonedata(response.data)
if identifier:
zonedata = [
entry for entry in zonedata if self._identifier(entry) != identifier
]
else:
zonedata = [
entry
for entry in zonedata
if entry["type"] is None
or self._identifier(entry)
!= self._identifier(
{
"type": rtype,
"label": name if name else entry["label"],
"target": content if content else entry["target"],
}
)
]
self._apply_zonedata(zonedata)
return True
def _apply_zonedata(self, zonedata):
data = []
for entry in zonedata:
if "raw" in entry:
data.append(entry["raw"])
else:
# TXT entries always require content to be quoted
target = (
f'"{entry["target"]}"'
if entry["type"] == "TXT"
else entry["target"]
)
line = f"{entry['label']} {entry['type']} {entry['pri']} {target} {entry['ttl']}"
if entry["valid-from"] is not None and entry["valid-to"] is not None:
line = f"{line} {entry['valid-from']} {entry['valid-to']}"
if entry["parameters"] is not None:
line = f"{line} {entry['parameters']}"
data.append(line)
self._get(
"/dns-zone-put",
query_params={"domain": self.domain_id, "zone": "\n".join(data)},
)
def _identifier(self, record):
if "raw" in record:
return None
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(("type=" + record.get("type", "") + ",").encode("utf-8"))
digest.update(
("name=" + self._full_name(record.get("label", "")) + ",").encode("utf-8")
)
digest.update(("content=" + record.get("target", "") + ",").encode("utf-8"))
return binascii.hexlify(digest.finalize()).decode("utf-8")[0:7]
def _relative_name(self, record_name):
if record_name == self.domain_id:
return "@"
return super(Provider, self)._relative_name(record_name)
def _full_name(self, record_name):
if record_name == "@":
return self.domain_id
return super(Provider, self)._full_name(record_name)
def _extract_zonedata(data):
processed = []
for entry in data:
match = DNSZONE_ENTRY_PATTERN.match(entry)
if match:
extracted = {
"label": match.group(1),
"type": match.group(2),
"pri": match.group(3),
"target": match.group(4),
"ttl": int(match.group(5)),
}
try:
extracted.update(
{
"parameters": match.group(9),
"valid-from": None,
"valid-to": None,
}
)
except IndexError:
try:
extracted.update(
{
"parameters": match.group(8),
"valid-from": int(match.group(7)),
"valid-to": int(match.group(6)),
}
)
except IndexError:
pass
if extracted["type"] == "TXT":
extracted["target"] = re.sub(
r'"(.*)"', r"\1", extracted["target"]
).strip()
processed.append(extracted)
else:
processed.append(
{
"raw": entry,
"type": None,
}
)
return processed
def _process_response(response):
response.raise_for_status()
headers = {}
body = []
feed_headers = True
data = response.text
for line in data.split("\n"):
if not line:
feed_headers = False
continue
if feed_headers:
items = line.split(":")
headers[items[0]] = items[1].lstrip()
else:
body.append(line)
processed_response = _Response(headers, body)
if headers["Status-Code"] != "0":
if body and body[0] == "API key is invalid":
raise requests.exceptions.HTTPError(
f"{headers['Status-Code']} Error: API key is invalid for url: {response.url}",
response=processed_response,
)
raise requests.exceptions.HTTPError(
f"{headers['Status-Code']} Error: {headers['Status-Text']} ({headers['Error']}) "
f"for url: {response.url}",
response=processed_response,
)
return processed_response
<|code_end|>
| lexicon/providers/joker.py
<|code_start|>"""Module provider for Joker.com"""
# The Joker.com API is well documented.
# Several specificities compared to classic REST API in other providers:
# - everything is done with GET requests: all parameters (including actual records data
# and authentication data) are passed in the URLs,
# - DNS zones are represented in a "strongly" formatted flat format (not JSON or XML) that may
# remind a zone definition in bind9,
# - every operation requires to pass the entire updated zone through the API, so care must be
# taken to not alter unexpectedly other entries during the create/update/delete operations,
# - all headers and data are contained in the response body; then this kind of body is composed
# of several lines of type key: value containing the headers (including errors), then a blank
# line makes the separation with the data itself (see _process_response for the body parsing).
import binascii
import json
import logging
import re
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["ns.joker.com"]
API_BASE_URL = "https://dmapi.joker.com/request"
DNSZONE_ENTRY_PATTERN = re.compile(
r"^(.+?)\s+(\w+)\s+(.+?)\s+((?:\\\"(?:.+?)\\\")|(?:.+?))\s+(\d+)(?:\s+(\d+)\s+(\d+)\s+(.+?)|\s+(.+?)|)$"
)
def provider_parser(subparser):
"""Generate a subparser for Joker"""
subparser.description = """
The Joker.com provider requires a valid token for authentication.
You can create one in the section 'Manage Joker.com API access keys' of 'My Profile' in your Joker.com account.
"""
subparser.add_argument(
"--auth-token",
help="specify the API Key to connect to the Joker.com API",
)
class _Response:
def __init__(self, headers, data):
self.headers = headers
self.data = data
def __str__(self):
return json.dumps({"headers": self.headers, "data": self.data})
class Provider(BaseProvider):
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self._session_id = None
def _authenticate(self):
auth_token = self._get_provider_option("auth_token")
response = requests.post(API_BASE_URL + "/login", data={"api-key": auth_token})
result = _process_response(response)
self._session_id = result.headers["Auth-Sid"]
response = self._post(
"/query-domain-list", data={"pattern": self.domain, "showstatus": 1}
)
if not response.data:
raise AuthenticationError(
f"Domain {self.domain} is not registered with this account."
)
data = response.data[0]
items = data.split(" ")
domain_status = items[2].split(",")
if len(set(domain_status).difference(["production", "lock", "autorenew"])) > 0:
raise AuthenticationError(
f"Current status for domain {self.domain} is: {items[2]}"
)
self.domain_id = self.domain
def _request(self, action="POST", url="/", data=None, query_params=None):
if not data:
data = {}
data["auth-sid"] = self._session_id
response = requests.post(API_BASE_URL + url, data=data)
return _process_response(response)
def _list_records(self, rtype=None, name=None, content=None):
response = self._post("/dns-zone-get", data={"domain": self.domain_id})
zone_data = _extract_zonedata(response.data)
zone_data = [entry for entry in zone_data if entry["type"]]
if rtype:
zone_data = [entry for entry in zone_data if entry["type"] == rtype]
if name:
zone_data = [
entry
for entry in zone_data
if self._full_name(entry["label"]) == self._full_name(name)
]
if content:
zone_data = [
entry for entry in zone_data if entry["target"] == content.strip()
]
records = [
{
"type": entry["type"],
"name": self._full_name(entry["label"]),
"ttl": entry["ttl"],
"content": entry["target"],
"id": self._identifier(entry),
}
for entry in zone_data
]
LOGGER.debug("list_records: %s", records)
return records
def _create_record(self, rtype, name, content):
if not rtype or not name or not content:
raise Exception(
"Error, rtype, name and content are mandatory to create a record."
)
response = self._post("/dns-zone-get", data={"domain": self.domain_id})
zonedata = _extract_zonedata(response.data)
new_entry = {
"label": self._relative_name(name),
"type": rtype,
"pri": 0,
"target": content,
"ttl": self._get_lexicon_option("ttl"),
"valid-from": None,
"valid-to": None,
"parameters": None,
}
if any(
entry
for entry in zonedata
if self._identifier(new_entry) == self._identifier(entry)
):
LOGGER.debug(
"create_record (ignored, duplicate): %s", self._identifier(new_entry)
)
return True
zonedata.append(new_entry)
self._apply_zonedata(zonedata)
return True
def _update_record(self, identifier, rtype=None, name=None, content=None):
if not identifier and not (rtype and name):
raise Exception(
"Error, either identifier or rtype + name are mandatory to update a record."
)
response = self._post("/dns-zone-get", data={"domain": self.domain_id})
zonedata = _extract_zonedata(response.data)
selector_info = (
f"identifier={identifier}" if identifier else f"type={rtype},name={name}"
)
if identifier:
to_update = [
entry for entry in zonedata if self._identifier(entry) == identifier
]
else:
to_update = [
entry
for entry in zonedata
if entry["type"] == rtype
and self._full_name(entry["label"]) == self._full_name(name)
]
if not to_update:
raise Exception(f"Error, could not find a record for {selector_info}.")
if len(to_update) > 1:
raise Exception(
f"Error, found more than one record for {selector_info}. "
"Please use an identifier to select one explicitly."
)
to_update[0].update(
{
"type": rtype if rtype else to_update[0]["type"],
"label": self._relative_name(name) if name else to_update[0]["label"],
"target": content if content else to_update[0]["content"],
"ttl": self._get_lexicon_option("ttl"),
}
)
self._apply_zonedata(zonedata)
return True
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if not identifier and not rtype:
raise Exception(
"Error, either rtype or identifier are mandatory to delete a record."
)
response = self._post("/dns-zone-get", data={"domain": self.domain_id})
zonedata = _extract_zonedata(response.data)
if identifier:
zonedata = [
entry for entry in zonedata if self._identifier(entry) != identifier
]
else:
zonedata = [
entry
for entry in zonedata
if entry["type"] is None
or self._identifier(entry)
!= self._identifier(
{
"type": rtype,
"label": name if name else entry["label"],
"target": content if content else entry["target"],
}
)
]
self._apply_zonedata(zonedata)
return True
def _apply_zonedata(self, zonedata):
data = []
for entry in zonedata:
if "raw" in entry:
data.append(entry["raw"])
else:
# TXT entries always require content to be quoted
target = (
f'"{entry["target"]}"'
if entry["type"] == "TXT"
else entry["target"]
)
line = f"{entry['label']} {entry['type']} {entry['pri']} {target} {entry['ttl']}"
if entry["valid-from"] is not None and entry["valid-to"] is not None:
line = f"{line} {entry['valid-from']} {entry['valid-to']}"
if entry["parameters"] is not None:
line = f"{line} {entry['parameters']}"
data.append(line)
self._post(
"/dns-zone-put",
data={"domain": self.domain_id, "zone": "\n".join(data)},
)
def _identifier(self, record):
if "raw" in record:
return None
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(("type=" + record.get("type", "") + ",").encode("utf-8"))
digest.update(
("name=" + self._full_name(record.get("label", "")) + ",").encode("utf-8")
)
digest.update(("content=" + record.get("target", "") + ",").encode("utf-8"))
return binascii.hexlify(digest.finalize()).decode("utf-8")[0:7]
def _relative_name(self, record_name):
if record_name == self.domain_id:
return "@"
return super(Provider, self)._relative_name(record_name)
def _full_name(self, record_name):
if record_name == "@":
return self.domain_id
return super(Provider, self)._full_name(record_name)
def _extract_zonedata(data):
processed = []
for entry in data:
match = DNSZONE_ENTRY_PATTERN.match(entry)
if match:
extracted = {
"label": match.group(1),
"type": match.group(2),
"pri": match.group(3),
"target": match.group(4),
"ttl": int(match.group(5)),
}
try:
extracted.update(
{
"parameters": match.group(9),
"valid-from": None,
"valid-to": None,
}
)
except IndexError:
try:
extracted.update(
{
"parameters": match.group(8),
"valid-from": int(match.group(7)),
"valid-to": int(match.group(6)),
}
)
except IndexError:
pass
if extracted["type"] == "TXT":
extracted["target"] = re.sub(
r'"(.*)"', r"\1", extracted["target"]
).strip()
processed.append(extracted)
else:
processed.append(
{
"raw": entry,
"type": None,
}
)
return processed
def _process_response(response):
response.raise_for_status()
headers = {}
body = []
feed_headers = True
data = response.text
for line in data.split("\n"):
if not line:
feed_headers = False
continue
if feed_headers:
items = line.split(":")
headers[items[0]] = items[1].lstrip()
else:
body.append(line)
processed_response = _Response(headers, body)
if headers["Status-Code"] != "0":
if body and body[0] == "API key is invalid":
raise requests.exceptions.HTTPError(
f"{headers['Status-Code']} Error: API key is invalid for url: {response.url}",
response=processed_response,
)
raise requests.exceptions.HTTPError(
f"{headers['Status-Code']} Error: {headers['Status-Text']} ({headers['Error']}) "
f"for url: {response.url}",
response=processed_response,
)
return processed_response
<|code_end|>
|
transip provider needs requests>=2.27.0 in dependency spec
The update of the transip provider from #1086 included in 3.9.2 uses `from requests.exceptions import JSONDecodeError` which was first included in requests 2.27.0 but the requirements spec only requires `>=2`:
https://github.com/AnalogJ/lexicon/blob/d8b26bcfffd484f34444c91ceb3ced4a2583bd18/pyproject.toml#L43
This is passed directly through to the requirement spec in the wheel:
```
Requires-Dist: requests (>=2)
```
And means that someone who upgrades dns-lexicon from a previous version using pip will get an import error.
| lexicon/providers/transip.py
<|code_start|>"""Module provider for TransIP"""
import binascii
import json
import logging
import uuid
from base64 import b64decode, b64encode
from typing import Any, Dict, List, Optional
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from requests.exceptions import JSONDecodeError
from lexicon.exceptions import LexiconError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS: List[str] = []
API_BASE_URL = "https://api.transip.nl/v6"
def provider_parser(subparser):
"""Configure provider parser for TransIP"""
subparser.add_argument(
"--auth-username", help="specify username for authentication"
)
subparser.add_argument(
"--auth-api-key",
help="specify the private key to use for API authentication, in PEM format: can be either "
"the path of the key file (eg. /tmp/key.pem) or the base64 encoded content of this "
"file prefixed by 'base64::' (eg. base64::eyJhbGciOyJ...)",
)
subparser.add_argument(
"--auth-key-is-global",
action="store_true",
help="set this flag is the private key used is a global key with no IP whitelist restriction",
)
class Provider(BaseProvider):
"""
Provider class for TransIP
provider_options can be overwritten by a Provider to setup custom defaults.
They will be overwritten by any options set via the CLI or Env.
order is:
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.provider_name = "transip"
self.domain_id = None
private_key_conf = self._get_provider_option("auth_api_key")
if private_key_conf.startswith("base64::"):
private_key_bytes = b64decode(private_key_conf.replace("base64::", ""))
else:
with open(
private_key_conf,
"rb",
) as file:
private_key_bytes = file.read()
self.private_key = load_pem_private_key(private_key_bytes, password=None)
self.token: str
def _authenticate(self):
request_body = {
"login": self._get_provider_option("auth_username"),
"nonce": uuid.uuid4().hex,
"global_key": self._get_provider_option("auth_key_is_global") or False,
}
request_body_bytes = json.dumps(request_body).encode()
signature = self.private_key.sign(
request_body_bytes,
padding.PKCS1v15(),
hashes.SHA512(),
)
headers = {"Signature": b64encode(signature).decode()}
response = requests.request(
"POST", f"{API_BASE_URL}/auth", json=request_body, headers=headers
)
response.raise_for_status()
self.token = response.json()["token"]
data = self._get(f"/domains/{self.domain}")
self.domain_id = data["domain"]["authCode"]
def _create_record(self, rtype: str, name: str, content: str) -> bool:
if not rtype or not name or not content:
raise Exception(
"Error, rtype, name and content are mandatory to create a record."
)
identifier = Provider._identifier(
{"type": rtype, "name": self._full_name(name), "content": content}
)
if any(
record
for record in self._list_records(rtype=rtype, name=name, content=content)
if record["id"] == identifier
):
LOGGER.debug("create_record (ignored, duplicate): %s", identifier)
return True
data = {
"dnsEntry": {
"type": rtype,
"name": self._relative_name(name),
"content": content,
"expire": self._get_lexicon_option("ttl"),
},
}
self._post(f"/domains/{self.domain}/dns", data=data)
LOGGER.debug("create_record: %s", identifier)
return True
def _list_records(
self,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> List[Dict[str, Any]]:
data = self._get(f"/domains/{self.domain}/dns")
records = []
for entry in data["dnsEntries"]:
record = {
"type": entry["type"],
"name": self._full_name(entry["name"]),
"ttl": entry["expire"],
"content": entry["content"],
}
record["id"] = Provider._identifier(record)
records.append(record)
if rtype:
records = [record for record in records if record["type"] == rtype]
if name:
records = [
record for record in records if record["name"] == self._full_name(name)
]
if content:
records = [record for record in records if record["content"] == content]
LOGGER.debug("list_records: %s", records)
return records
def _update_record(
self,
identifier: Optional[str] = None,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> bool:
if not identifier and (not rtype or not name):
raise Exception("Error, identifier or rtype+name parameters are required.")
if identifier:
records = self._list_records()
records_to_update = [
record for record in records if record["id"] == identifier
]
else:
records_to_update = self._list_records(rtype=rtype, name=name)
if not records_to_update:
raise Exception(
f"Error, could not find a record for given identifier: {identifier}"
)
if len(records_to_update) > 1:
LOGGER.warning(
"Warning, multiple records found for given parameters, "
"only first one will be updated: %s",
records_to_update,
)
record = records_to_update[0]
# TransIP API is not designed to update one record out of several records
# matching the same type+name (eg. multi-valued TXT entries).
# To circumvent the limitation, we remove first the record to update, then
# recreate it with the updated content.
data = {
"dnsEntry": {
"type": record["type"],
"name": self._relative_name(record["name"]),
"content": record["content"],
"expire": record["ttl"],
},
}
self._request("DELETE", f"/domains/{self.domain}/dns", data=data)
data["dnsEntry"]["content"] = content
self._post(f"/domains/{self.domain}/dns", data=data)
LOGGER.debug("update_record: %s", record["id"])
return True
def _delete_record(
self,
identifier: Optional[str] = None,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> bool:
if identifier:
records = self._list_records()
records = [record for record in records if record["id"] == identifier]
if not records:
raise LexiconError(
f"Could not find a record matching the identifier provider: {identifier}"
)
else:
records = self._list_records(rtype, name, content)
for record in records:
data = {
"dnsEntry": {
"type": record["type"],
"name": self._relative_name(record["name"]),
"content": record["content"],
"expire": record["ttl"],
},
}
self._request("DELETE", f"/domains/{self.domain}/dns", data=data)
LOGGER.debug("delete_records: %s %s %s %s", identifier, rtype, name, content)
return True
def _request(
self,
action: str = "GET",
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Optional[Dict[str, Any]]:
response = requests.request(
action,
f"{API_BASE_URL}{url}",
params=query_params,
json=data,
headers={"Authorization": f"Bearer {self.token}"},
)
response.raise_for_status()
try:
return response.json()
except JSONDecodeError:
return None
@staticmethod
def _identifier(record):
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(("type=" + record.get("type", "") + ",").encode("utf-8"))
digest.update(("name=" + record.get("name", "") + ",").encode("utf-8"))
digest.update(("content=" + record.get("content", "") + ",").encode("utf-8"))
return binascii.hexlify(digest.finalize()).decode("utf-8")[0:7]
<|code_end|>
| lexicon/providers/transip.py
<|code_start|>"""Module provider for TransIP"""
import binascii
import json
import logging
import uuid
from base64 import b64decode, b64encode
from typing import Any, Dict, List, Optional
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
try:
from simplejson import JSONDecodeError
except ImportError:
from json import JSONDecodeError # type: ignore[misc]
from lexicon.exceptions import LexiconError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS: List[str] = []
API_BASE_URL = "https://api.transip.nl/v6"
def provider_parser(subparser):
"""Configure provider parser for TransIP"""
subparser.add_argument(
"--auth-username", help="specify username for authentication"
)
subparser.add_argument(
"--auth-api-key",
help="specify the private key to use for API authentication, in PEM format: can be either "
"the path of the key file (eg. /tmp/key.pem) or the base64 encoded content of this "
"file prefixed by 'base64::' (eg. base64::eyJhbGciOyJ...)",
)
subparser.add_argument(
"--auth-key-is-global",
action="store_true",
help="set this flag is the private key used is a global key with no IP whitelist restriction",
)
class Provider(BaseProvider):
"""
Provider class for TransIP
provider_options can be overwritten by a Provider to setup custom defaults.
They will be overwritten by any options set via the CLI or Env.
order is:
"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.provider_name = "transip"
self.domain_id = None
private_key_conf = self._get_provider_option("auth_api_key")
if private_key_conf.startswith("base64::"):
private_key_bytes = b64decode(private_key_conf.replace("base64::", ""))
else:
with open(
private_key_conf,
"rb",
) as file:
private_key_bytes = file.read()
self.private_key = load_pem_private_key(private_key_bytes, password=None)
self.token: str
def _authenticate(self):
request_body = {
"login": self._get_provider_option("auth_username"),
"nonce": uuid.uuid4().hex,
"global_key": self._get_provider_option("auth_key_is_global") or False,
}
request_body_bytes = json.dumps(request_body).encode()
signature = self.private_key.sign(
request_body_bytes,
padding.PKCS1v15(),
hashes.SHA512(),
)
headers = {"Signature": b64encode(signature).decode()}
response = requests.request(
"POST", f"{API_BASE_URL}/auth", json=request_body, headers=headers
)
response.raise_for_status()
self.token = response.json()["token"]
data = self._get(f"/domains/{self.domain}")
self.domain_id = data["domain"]["authCode"]
def _create_record(self, rtype: str, name: str, content: str) -> bool:
if not rtype or not name or not content:
raise Exception(
"Error, rtype, name and content are mandatory to create a record."
)
identifier = Provider._identifier(
{"type": rtype, "name": self._full_name(name), "content": content}
)
if any(
record
for record in self._list_records(rtype=rtype, name=name, content=content)
if record["id"] == identifier
):
LOGGER.debug("create_record (ignored, duplicate): %s", identifier)
return True
data = {
"dnsEntry": {
"type": rtype,
"name": self._relative_name(name),
"content": content,
"expire": self._get_lexicon_option("ttl"),
},
}
self._post(f"/domains/{self.domain}/dns", data=data)
LOGGER.debug("create_record: %s", identifier)
return True
def _list_records(
self,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> List[Dict[str, Any]]:
data = self._get(f"/domains/{self.domain}/dns")
records = []
for entry in data["dnsEntries"]:
record = {
"type": entry["type"],
"name": self._full_name(entry["name"]),
"ttl": entry["expire"],
"content": entry["content"],
}
record["id"] = Provider._identifier(record)
records.append(record)
if rtype:
records = [record for record in records if record["type"] == rtype]
if name:
records = [
record for record in records if record["name"] == self._full_name(name)
]
if content:
records = [record for record in records if record["content"] == content]
LOGGER.debug("list_records: %s", records)
return records
def _update_record(
self,
identifier: Optional[str] = None,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> bool:
if not identifier and (not rtype or not name):
raise Exception("Error, identifier or rtype+name parameters are required.")
if identifier:
records = self._list_records()
records_to_update = [
record for record in records if record["id"] == identifier
]
else:
records_to_update = self._list_records(rtype=rtype, name=name)
if not records_to_update:
raise Exception(
f"Error, could not find a record for given identifier: {identifier}"
)
if len(records_to_update) > 1:
LOGGER.warning(
"Warning, multiple records found for given parameters, "
"only first one will be updated: %s",
records_to_update,
)
record = records_to_update[0]
# TransIP API is not designed to update one record out of several records
# matching the same type+name (eg. multi-valued TXT entries).
# To circumvent the limitation, we remove first the record to update, then
# recreate it with the updated content.
data = {
"dnsEntry": {
"type": record["type"],
"name": self._relative_name(record["name"]),
"content": record["content"],
"expire": record["ttl"],
},
}
self._request("DELETE", f"/domains/{self.domain}/dns", data=data)
data["dnsEntry"]["content"] = content
self._post(f"/domains/{self.domain}/dns", data=data)
LOGGER.debug("update_record: %s", record["id"])
return True
def _delete_record(
self,
identifier: Optional[str] = None,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> bool:
if identifier:
records = self._list_records()
records = [record for record in records if record["id"] == identifier]
if not records:
raise LexiconError(
f"Could not find a record matching the identifier provider: {identifier}"
)
else:
records = self._list_records(rtype, name, content)
for record in records:
data = {
"dnsEntry": {
"type": record["type"],
"name": self._relative_name(record["name"]),
"content": record["content"],
"expire": record["ttl"],
},
}
self._request("DELETE", f"/domains/{self.domain}/dns", data=data)
LOGGER.debug("delete_records: %s %s %s %s", identifier, rtype, name, content)
return True
def _request(
self,
action: str = "GET",
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Optional[Dict[str, Any]]:
response = requests.request(
action,
f"{API_BASE_URL}{url}",
params=query_params,
json=data,
headers={"Authorization": f"Bearer {self.token}"},
)
response.raise_for_status()
try:
return response.json()
except JSONDecodeError:
return None
@staticmethod
def _identifier(record):
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(("type=" + record.get("type", "") + ",").encode("utf-8"))
digest.update(("name=" + record.get("name", "") + ",").encode("utf-8"))
digest.update(("content=" + record.get("content", "") + ",").encode("utf-8"))
return binascii.hexlify(digest.finalize()).decode("utf-8")[0:7]
<|code_end|>
|
RFE: Replace use of `pkg_resources` with `importlib.metadata`
See discussions:
[astropy/astropy#11091](https://github.com/astropy/astropy/pull/11091)
[pypa/pip#7413](https://github.com/pypa/pip/issues/7413)
```console
[tkloczko@devel-g2v lexicon-3.11.0]$ grep -r pkg_resources
lexicon/discovery.py:import pkg_resources
lexicon/discovery.py: distribution = pkg_resources.get_distribution("dns-lexicon")
lexicon/discovery.py: except pkg_resources.DistributionNotFound:
lexicon/discovery.py: return pkg_resources.get_distribution("dns-lexicon").version
lexicon/discovery.py: except pkg_resources.DistributionNotFound:
lexicon/discovery.py: provider: str, distribution: pkg_resources.Distribution
lexicon/discovery.py: requirements: List[pkg_resources.Requirement] = distribution.requires(
lexicon/discovery.py: except pkg_resources.UnknownExtra:
lexicon/discovery.py: pkg_resources.get_distribution(requirement.name) # type: ignore
lexicon/discovery.py: pkg_resources.get_distribution(requirement)
lexicon/discovery.py: except (pkg_resources.DistributionNotFound, pkg_resources.VersionConflict):
t:| * 90642ca - (origin/fix-provider-disco-legacy, jamin/fix-provider-disco-legacy, giuse/fix-provider-disco-legacy) Support old versions of pkg_resources (2 years, 10 months ago) <Adrien Ferrand>
```
| lexicon/client.py
<|code_start|>"""Main module of Lexicon. Defines the Client class, that holds all Lexicon logic."""
import importlib
import logging
import os
from typing import Dict, List, Optional, Type, Union, cast
import tldextract # type: ignore
from lexicon import config as helper_config
from lexicon import discovery
from lexicon.exceptions import ProviderNotAvailableError
from lexicon.providers.base import Provider
class Client(object):
"""This is the Lexicon client, that will execute all the logic."""
def __init__(
self, config: Optional[Union[helper_config.ConfigResolver, Dict]] = None
):
if not config:
# If there is not config specified, we load a non-interactive configuration.
self.config = helper_config.non_interactive_config_resolver()
elif not isinstance(config, helper_config.ConfigResolver):
# If config is not a ConfigResolver, we are in a legacy situation.
# We protect this part of the Client API.
self.config = helper_config.legacy_config_resolver(config)
else:
self.config = config
# Validate configuration
self._validate_config()
runtime_config = {}
# Process domain, strip subdomain
try:
domain_extractor = tldextract.TLDExtract(
cache_dir=_get_tldextract_cache_path(), include_psl_private_domains=True
)
except TypeError:
domain_extractor = tldextract.TLDExtract(
cache_file=_get_tldextract_cache_path(), include_psl_private_domains=True # type: ignore
)
domain_parts = domain_extractor(
cast(str, self.config.resolve("lexicon:domain"))
)
runtime_config["domain"] = f"{domain_parts.domain}.{domain_parts.suffix}"
delegated = self.config.resolve("lexicon:delegated")
if delegated:
# handle delegated domain
delegated = str(delegated).rstrip(".")
initial_domain = str(runtime_config.get("domain"))
if delegated != initial_domain:
# convert to relative name
if delegated.endswith(initial_domain):
delegated = delegated[: -len(initial_domain)]
delegated = delegated.rstrip(".")
# update domain
runtime_config["domain"] = f"{delegated}.{initial_domain}"
self.action = self.config.resolve("lexicon:action")
self.provider_name = self.config.resolve(
"lexicon:provider_name"
) or self.config.resolve("lexicon:provider")
if not self.provider_name:
raise ValueError("Could not resolve provider name.")
self.config.add_config_source(helper_config.DictConfigSource(runtime_config), 0)
provider_module = importlib.import_module(
"lexicon.providers." + self.provider_name
)
provider_class: Type[Provider] = getattr(provider_module, "Provider")
self.provider = provider_class(self.config)
def execute(self) -> Union[bool, List[Dict]]:
"""Execute provided configuration in class constructor to the DNS records"""
self.provider.authenticate()
identifier = self.config.resolve("lexicon:identifier")
record_type = self.config.resolve("lexicon:type")
name = self.config.resolve("lexicon:name")
content = self.config.resolve("lexicon:content")
if self.action == "create":
if not record_type or not name or not content:
raise ValueError("Missing record_type, name or content parameters.")
return self.provider.create_record(record_type, name, content)
if self.action == "list":
return self.provider.list_records(record_type, name, content)
if self.action == "update":
return self.provider.update_record(identifier, record_type, name, content)
if self.action == "delete":
return self.provider.delete_record(identifier, record_type, name, content)
raise ValueError(f"Invalid action statement: {self.action}")
def _validate_config(self) -> None:
provider_name = self.config.resolve("lexicon:provider_name")
if not provider_name:
raise AttributeError("provider_name")
try:
available = discovery.find_providers()[provider_name]
except KeyError:
raise ProviderNotAvailableError(
f"This provider ({provider_name}) is not supported by Lexicon."
)
else:
if not available:
raise ProviderNotAvailableError(
f"This provider ({provider_name}) has required dependencies that are missing. "
f"Please install lexicon[{provider_name}] first."
)
if not self.config.resolve("lexicon:action"):
raise AttributeError("action")
if not self.config.resolve("lexicon:domain"):
raise AttributeError("domain")
if not self.config.resolve("lexicon:type"):
raise AttributeError("type")
def _get_tldextract_cache_path() -> str:
if os.environ.get("TLDEXTRACT_CACHE_FILE"):
logging.warning(
"TLD_EXTRACT_CACHE_FILE environment variable is deprecated, please use TLDEXTRACT_CACHE_PATH instead."
)
os.environ["TLDEXTRACT_CACHE_PATH"] = os.environ["TLDEXTRACT_CACHE_FILE"]
return os.path.expanduser(
os.environ.get("TLDEXTRACT_CACHE_PATH", os.path.join("~", ".lexicon_tld_set"))
)
<|code_end|>
lexicon/discovery.py
<|code_start|>"""
This module takes care of finding information about the runtime of Lexicon:
* what are the providers installed, and available
* what is the version of Lexicon
"""
import pkgutil
from typing import Dict, List
import pkg_resources
from lexicon import providers
def find_providers() -> Dict[str, bool]:
"""Find all providers registered in Lexicon, and their availability"""
providers_list = sorted(
{
modname
for (_, modname, _) in pkgutil.iter_modules(providers.__path__) # type: ignore
if modname != "base"
}
)
try:
distribution = pkg_resources.get_distribution("dns-lexicon")
except pkg_resources.DistributionNotFound:
return {provider: True for provider in providers_list}
else:
return {
provider: _resolve_requirements(provider, distribution)
for provider in providers_list
}
def lexicon_version() -> str:
"""Retrieve current Lexicon version"""
try:
return pkg_resources.get_distribution("dns-lexicon").version
except pkg_resources.DistributionNotFound:
return "unknown"
def _resolve_requirements(
provider: str, distribution: pkg_resources.Distribution
) -> bool:
try:
requirements: List[pkg_resources.Requirement] = distribution.requires(
extras=(provider,)
)
except pkg_resources.UnknownExtra:
# No extra for this provider
return True
else:
# Extra is defined
try:
for requirement in requirements:
if hasattr(requirement, "name"):
pkg_resources.get_distribution(requirement.name) # type: ignore
else:
pkg_resources.get_distribution(requirement)
except (pkg_resources.DistributionNotFound, pkg_resources.VersionConflict):
# At least one extra requirement is not fulfilled
return False
return True
<|code_end|>
lexicon/parser.py
<|code_start|>"""Parsers definition for the Lexicon command-line interface"""
import argparse
import importlib
import os
from lexicon import discovery
def generate_base_provider_parser() -> argparse.ArgumentParser:
"""Function that generates the base provider to be used by all dns providers."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"action",
help="specify the action to take",
default="list",
choices=["create", "list", "update", "delete"],
)
parser.add_argument(
"domain", help="specify the domain, supports subdomains as well"
)
parser.add_argument(
"type",
help="specify the entry type",
default="TXT",
choices=["A", "AAAA", "CNAME", "MX", "NS", "SOA", "TXT", "SRV", "LOC"],
)
parser.add_argument("--name", help="specify the record name")
parser.add_argument("--content", help="specify the record content")
parser.add_argument("--ttl", type=int, help="specify the record time-to-live")
parser.add_argument("--priority", help="specify the record priority")
parser.add_argument(
"--identifier", help="specify the record for update or delete actions"
)
parser.add_argument(
"--log_level",
help="specify the log level",
default="ERROR",
choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"],
)
parser.add_argument(
"--output",
help=(
"specify the type of output: by default a formatted table (TABLE), "
"a formatted table without header (TABLE-NO-HEADER), "
"a JSON string (JSON) or no output (QUIET)"
),
default="TABLE",
choices=["TABLE", "TABLE-NO-HEADER", "JSON", "QUIET"],
)
return parser
def generate_cli_main_parser() -> argparse.ArgumentParser:
"""Using all providers available, generate a parser that will be used by Lexicon CLI"""
parser = argparse.ArgumentParser(
description="Create, Update, Delete, List DNS entries"
)
parser.add_argument(
"--version",
help="show the current version of lexicon",
action="version",
version=f"%(prog)s {discovery.lexicon_version()}",
)
parser.add_argument("--delegated", help="specify the delegated domain")
parser.add_argument(
"--config-dir",
default=os.getcwd(),
help="specify the directory where to search lexicon.yml and "
"lexicon_[provider].yml configuration files "
"(default: current directory).",
)
subparsers = parser.add_subparsers(
dest="provider_name", help="specify the DNS provider to use"
)
subparsers.required = True
for provider, available in discovery.find_providers().items():
provider_module = importlib.import_module("lexicon.providers." + provider)
provider_parser = getattr(provider_module, "provider_parser")
subparser = subparsers.add_parser(
provider,
help=f"{provider} provider",
parents=[generate_base_provider_parser()],
)
provider_parser(subparser)
if not available:
subparser.epilog = (
"WARNING: some required dependencies for this provider are not "
f"installed. Please install lexicon[{provider}] first before using it."
)
return parser
<|code_end|>
| lexicon/client.py
<|code_start|>"""Main module of Lexicon. Defines the Client class, that holds all Lexicon logic."""
import importlib
import logging
import os
from typing import Dict, List, Optional, Type, Union, cast
import tldextract # type: ignore
from lexicon import config as helper_config
from lexicon import discovery
from lexicon.exceptions import ProviderNotAvailableError
from lexicon.providers.base import Provider
class Client(object):
"""This is the Lexicon client, that will execute all the logic."""
def __init__(
self, config: Optional[Union[helper_config.ConfigResolver, Dict]] = None
):
if not config:
# If there is not config specified, we load a non-interactive configuration.
self.config = helper_config.non_interactive_config_resolver()
elif not isinstance(config, helper_config.ConfigResolver):
# If config is not a ConfigResolver, we are in a legacy situation.
# We protect this part of the Client API.
self.config = helper_config.legacy_config_resolver(config)
else:
self.config = config
# Validate configuration
self._validate_config()
runtime_config = {}
# Process domain, strip subdomain
try:
domain_extractor = tldextract.TLDExtract(
cache_dir=_get_tldextract_cache_path(), include_psl_private_domains=True
)
except TypeError:
domain_extractor = tldextract.TLDExtract(
cache_file=_get_tldextract_cache_path(), include_psl_private_domains=True # type: ignore
)
domain_parts = domain_extractor(
cast(str, self.config.resolve("lexicon:domain"))
)
runtime_config["domain"] = f"{domain_parts.domain}.{domain_parts.suffix}"
delegated = self.config.resolve("lexicon:delegated")
if delegated:
# handle delegated domain
delegated = str(delegated).rstrip(".")
initial_domain = str(runtime_config.get("domain"))
if delegated != initial_domain:
# convert to relative name
if delegated.endswith(initial_domain):
delegated = delegated[: -len(initial_domain)]
delegated = delegated.rstrip(".")
# update domain
runtime_config["domain"] = f"{delegated}.{initial_domain}"
self.action = self.config.resolve("lexicon:action")
self.provider_name = self.config.resolve(
"lexicon:provider_name"
) or self.config.resolve("lexicon:provider")
if not self.provider_name:
raise ValueError("Could not resolve provider name.")
self.config.add_config_source(helper_config.DictConfigSource(runtime_config), 0)
provider_module = importlib.import_module(
"lexicon.providers." + self.provider_name
)
provider_class: Type[Provider] = getattr(provider_module, "Provider")
self.provider = provider_class(self.config)
def execute(self) -> Union[bool, List[Dict]]:
"""Execute provided configuration in class constructor to the DNS records"""
self.provider.authenticate()
identifier = self.config.resolve("lexicon:identifier")
record_type = self.config.resolve("lexicon:type")
name = self.config.resolve("lexicon:name")
content = self.config.resolve("lexicon:content")
if self.action == "create":
if not record_type or not name or not content:
raise ValueError("Missing record_type, name or content parameters.")
return self.provider.create_record(record_type, name, content)
if self.action == "list":
return self.provider.list_records(record_type, name, content)
if self.action == "update":
return self.provider.update_record(identifier, record_type, name, content)
if self.action == "delete":
return self.provider.delete_record(identifier, record_type, name, content)
raise ValueError(f"Invalid action statement: {self.action}")
def _validate_config(self) -> None:
provider_name = self.config.resolve("lexicon:provider_name")
if not provider_name:
raise AttributeError("provider_name")
try:
available = discovery.find_providers()[provider_name]
except KeyError:
raise ProviderNotAvailableError(
f"This provider ({provider_name}) is not supported by Lexicon."
)
else:
if not available:
raise ProviderNotAvailableError(
f"This provider ({provider_name}) has required extra dependencies that are missing. "
f"Please run `pip install lexicon[{provider_name}]` first before using it."
)
if not self.config.resolve("lexicon:action"):
raise AttributeError("action")
if not self.config.resolve("lexicon:domain"):
raise AttributeError("domain")
if not self.config.resolve("lexicon:type"):
raise AttributeError("type")
def _get_tldextract_cache_path() -> str:
if os.environ.get("TLDEXTRACT_CACHE_FILE"):
logging.warning(
"TLD_EXTRACT_CACHE_FILE environment variable is deprecated, please use TLDEXTRACT_CACHE_PATH instead."
)
os.environ["TLDEXTRACT_CACHE_PATH"] = os.environ["TLDEXTRACT_CACHE_FILE"]
return os.path.expanduser(
os.environ.get("TLDEXTRACT_CACHE_PATH", os.path.join("~", ".lexicon_tld_set"))
)
<|code_end|>
lexicon/discovery.py
<|code_start|>"""
This module takes care of finding information about the runtime of Lexicon:
* what are the providers installed, and available
* what is the version of Lexicon
"""
import pkgutil
import re
from typing import Dict
try:
from importlib.metadata import Distribution, PackageNotFoundError
except ModuleNotFoundError:
from importlib_metadata import Distribution, PackageNotFoundError # type: ignore[misc]
from lexicon import providers
def find_providers() -> Dict[str, bool]:
"""Find all providers registered in Lexicon, and their availability"""
providers_list = sorted(
{
modname
for (_, modname, _) in pkgutil.iter_modules(providers.__path__) # type: ignore
if modname != "base"
}
)
try:
distribution = Distribution.from_name("dns-lexicon")
except PackageNotFoundError:
return {provider: True for provider in providers_list}
else:
return {
provider: _resolve_requirements(provider, distribution)
for provider in providers_list
}
def lexicon_version() -> str:
"""Retrieve current Lexicon version"""
try:
return Distribution.from_name("dns-lexicon").version
except PackageNotFoundError:
return "unknown"
def _resolve_requirements(provider: str, distribution: Distribution) -> bool:
requires = distribution.requires
if requires is None:
raise ValueError("Error while trying finding requirements.")
requirements = [
re.sub(r"^(.*)\s\(.*\)(?:;.*|)$", r"\1", requirement)
for requirement in requires
if f'extra == "{provider}"' in requirement
]
if not requirements:
# No extra for this provider
return True
for requirement in requirements:
try:
Distribution.from_name(requirement)
except PackageNotFoundError:
# At least one extra requirement is not fulfilled
return False
# All extra requirements are fulfilled
return True
<|code_end|>
lexicon/parser.py
<|code_start|>"""Parsers definition for the Lexicon command-line interface"""
import argparse
import importlib
import os
from lexicon import discovery
def generate_base_provider_parser() -> argparse.ArgumentParser:
"""Function that generates the base provider to be used by all dns providers."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"action",
help="specify the action to take",
default="list",
choices=["create", "list", "update", "delete"],
)
parser.add_argument(
"domain", help="specify the domain, supports subdomains as well"
)
parser.add_argument(
"type",
help="specify the entry type",
default="TXT",
choices=["A", "AAAA", "CNAME", "MX", "NS", "SOA", "TXT", "SRV", "LOC"],
)
parser.add_argument("--name", help="specify the record name")
parser.add_argument("--content", help="specify the record content")
parser.add_argument("--ttl", type=int, help="specify the record time-to-live")
parser.add_argument("--priority", help="specify the record priority")
parser.add_argument(
"--identifier", help="specify the record for update or delete actions"
)
parser.add_argument(
"--log_level",
help="specify the log level",
default="ERROR",
choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"],
)
parser.add_argument(
"--output",
help=(
"specify the type of output: by default a formatted table (TABLE), "
"a formatted table without header (TABLE-NO-HEADER), "
"a JSON string (JSON) or no output (QUIET)"
),
default="TABLE",
choices=["TABLE", "TABLE-NO-HEADER", "JSON", "QUIET"],
)
return parser
def generate_cli_main_parser() -> argparse.ArgumentParser:
"""Using all providers available, generate a parser that will be used by Lexicon CLI"""
parser = argparse.ArgumentParser(
description="Create, Update, Delete, List DNS entries"
)
parser.add_argument(
"--version",
help="show the current version of lexicon",
action="version",
version=f"%(prog)s {discovery.lexicon_version()}",
)
parser.add_argument("--delegated", help="specify the delegated domain")
parser.add_argument(
"--config-dir",
default=os.getcwd(),
help="specify the directory where to search lexicon.yml and "
"lexicon_[provider].yml configuration files "
"(default: current directory).",
)
subparsers = parser.add_subparsers(
dest="provider_name", help="specify the DNS provider to use"
)
subparsers.required = True
for provider, available in discovery.find_providers().items():
provider_module = importlib.import_module("lexicon.providers." + provider)
provider_parser = getattr(provider_module, "provider_parser")
subparser = subparsers.add_parser(
provider,
help=f"{provider} provider",
parents=[generate_base_provider_parser()],
)
provider_parser(subparser)
if not available:
subparser.epilog = (
"WARNING: some required dependencies for this provider are not "
f"installed. Please run `pip install lexicon[{provider}]` first before using it."
)
return parser
<|code_end|>
|
Add support for porkbun
https://porkbun.com/
Add support for porkbun
https://porkbun.com/
| lexicon/providers/porkbun.py
<|code_start|><|code_end|>
| lexicon/providers/porkbun.py
<|code_start|>import logging
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
import json
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["porkbun.com"]
def provider_parser(subparser):
"""Return the parser for this provider"""
subparser.description = """
To authenticate with Porkbun, you need both an API key and a
secret API key. These can be created at porkbun.com/account/api .
"""
subparser.add_argument("--auth-key", help="specify API key for authentication")
subparser.add_argument(
"--auth-secret", help="specify secret API key for authentication"
)
class Provider(BaseProvider):
"""Provider class for Porkbun"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api_endpoint = "https://porkbun.com/api/json/v3"
self._api_key = self._get_provider_option("auth_key")
self._secret_api_key = self._get_provider_option("auth_secret")
self._auth_data = {
"apikey": self._api_key,
"secretapikey": self._secret_api_key,
}
self.domain = self._get_lexicon_option("domain")
def _authenticate(self):
# more of a test that the authentication works
response = self._post("/ping")
if response["status"] != "SUCCESS":
raise AuthenticationError("Incorrect API keys")
self.domain_id = self.domain
self._list_records()
def _create_record(self, rtype, name, content):
active_records = self._list_records(rtype, name, content)
# if the record already exists: early exit, return success
if active_records:
LOGGER.debug("create_record: record already exists")
return True
data = {
"type": rtype,
"content": content,
"name": self._relative_name(name),
}
if self._get_lexicon_option("priority"):
data["prio"] = self._get_lexicon_option("priority")
if self._get_lexicon_option("ttl"):
data["ttl"] = self._get_lexicon_option("ttl")
response = self._post(f"/dns/create/{self.domain}", data)
LOGGER.debug(f"create_record: {response}")
return response["status"] == "SUCCESS"
def _list_records(self, rtype=None, name=None, content=None):
# porkbun has some weird behavior on the retrieveByNameType endpoint
# related to how it handles subdomains.
# so we ignore it and filter locally instead
records = self._post(f"/dns/retrieve/{self.domain}")
if records["status"] != "SUCCESS":
raise requests.exceptions.HTTPError(records)
records = records["records"]
records = self._format_records(records)
# filter for content if it was provided
if content is not None:
records = [x for x in records if x["content"] == content]
# filter for name if it was provided
if name is not None:
records = [x for x in records if x["name"] == self._full_name(name)]
# filter for rtype if it was provided
if rtype is not None:
records = [x for x in records if x["type"] == rtype]
LOGGER.debug(f"list_records: {records}")
LOGGER.debug(f"Number of records retrieved: {len(records)}")
return records
def _update_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier is None:
records = self._list_records(rtype, name)
if len(records) == 1:
identifier = records[0]["id"]
elif len(records) == 0:
raise Exception(
"No records found matching type and name - won't update"
)
else:
raise Exception(
"Multiple records found matching type and name - won't update"
)
endpoint = f"/dns/edit/{self.domain}/{identifier}"
data = {"name": self._relative_name(name), "type": rtype, "content": content}
# if set to 0, then this will automatically get set to 300
if self._get_lexicon_option("ttl"):
data["ttl"] = self._get_lexicon_option("ttl")
if self._get_lexicon_option("priority"):
data["prio"] = self._get_lexicon_option("priority")
result = self._post(endpoint, data)
LOGGER.debug(f"update_record: {result}")
return result["status"] == "SUCCESS"
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier is None:
records = self._list_records(rtype, name, content)
delete_record_ids = [record["id"] for record in records]
else:
delete_record_ids = [identifier]
LOGGER.debug(f"deleting records: {delete_record_ids}")
for record_id in delete_record_ids:
self._post(f"/dns/delete/{self.domain}/{record_id}")
LOGGER.debug("delete_record: success")
return True
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
headers = {"Content-Type": "application/json"}
response = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=json.dumps({**data, **self._auth_data}),
headers=headers,
)
response.raise_for_status()
return response.json()
def _format_records(self, records):
for record in records:
record["name"] = self._full_name(record["name"])
if "prio" in record:
record["options"] = {"mx": {"priority": record["prio"]}}
del record["prio"]
return records
<|code_end|>
|
Bug in create action for glesys provider
When creating an A record with the glesys provider, the full name is added instead of the host name.
```
lexicon_config = {
"provider_name" : "glesys",
"action": "create",
"domain": "somedomain.com",
"type": "A",
"name": "lexicon",
"content": "1.2.3.4",
"glesys": {
}
}
```
Results in the A-record:
`{'id': 2723410, 'type': 'A', 'name': 'lexicon.somedomain.com', 'ttl': 3600, 'content': '1.2.3.4'}`
While the expected result is:
`{'id': 2723410, 'type': 'A', 'name': 'lexicon', 'ttl': 3600, 'content': '1.2.3.4'}`
The request data sent to `domain/addrecord` :
`{'domainname': 'somedomain.com', 'host': 'lexicon.somedomain.com', 'type': 'A', 'data': '1.2.3.4', 'ttl': 3600}`
Expected request data to `domain/addrecord`:
`{'domainname': 'somedomain.com', 'host': 'lexicon', 'type': 'A', 'data': '1.2.3.4', 'ttl': 3600}`
Glesys API documentation:
```
domain/addrecord
Url: https://api.glesys.com/domain/addrecord
Method: Only Https POST
Required arguments: domainname , host , type , data
Optional arguments: ttl
Description: Adds a dns record to a domain
```
| lexicon/providers/glesys.py
<|code_start|>"""Module provider for Glesys"""
import json
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
NAMESERVER_DOMAINS = ["glesys.com"]
def provider_parser(subparser):
"""Generate a subparser for Glesys"""
subparser.add_argument("--auth-username", help="specify username (CL12345)")
subparser.add_argument("--auth-token", help="specify API key")
class Provider(BaseProvider):
"""Provider class for Glesys"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://api.glesys.com"
def _authenticate(self):
payload = self._get("/domain/list")
domains = payload["response"]["domains"]
for record in domains:
if record["domainname"] == self.domain:
# Domain records do not have any id.
# Since domain_id cannot be None, use domain name as id instead.
self.domain_id = record["domainname"]
break
else:
raise AuthenticationError("No domain found")
# Create record. If record already exists with the same content, do nothing.
def _create_record(self, rtype, name, content):
existing = self.list_records(rtype, name, content)
if existing:
# Already exists, do nothing.
return True
request_data = {
"domainname": self.domain,
"host": self._full_name(name),
"type": rtype,
"data": content,
}
self._addttl(request_data)
self._post("/domain/addrecord", data=request_data)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
request_data = {"domainname": self.domain}
payload = self._post("/domain/listrecords", data=request_data)
# Convert from Glesys record structure to Lexicon structure.
processed_records = [
self._glesysrecord2lexiconrecord(r) for r in payload["response"]["records"]
]
if rtype:
processed_records = [
record for record in processed_records if record["type"] == rtype
]
if name:
processed_records = [
record
for record in processed_records
if record["name"] == self._full_name(name)
]
if content:
processed_records = [
record
for record in processed_records
if record["content"].lower() == content.lower()
]
return processed_records
# Update a record. Identifier must be specified.
def _update_record(self, identifier, rtype=None, name=None, content=None):
request_data = {"recordid": identifier}
if name:
request_data["host"] = name
if rtype:
request_data["type"] = rtype
if content:
request_data["data"] = content
self._addttl(request_data)
self._post("/domain/updaterecord", data=request_data)
return True
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
for record_id in delete_record_id:
request_data = {"recordid": record_id}
self._post("/domain/deleterecord", data=request_data)
return True
# Helpers.
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
query_params["format"] = "json"
default_headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
credentials = (
self._get_provider_option("auth_username"),
self._get_provider_option("auth_token"),
)
response = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers=default_headers,
auth=credentials,
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
# Adds TTL parameter if passed as argument to lexicon.
def _addttl(self, request_data):
if self._get_lexicon_option("ttl"):
request_data["ttl"] = self._get_lexicon_option("ttl")
# From Glesys record structure: [u'domainname', u'recordid', u'type', u'host', u'ttl', u'data']
def _glesysrecord2lexiconrecord(self, glesys_record):
return {
"id": glesys_record["recordid"],
"type": glesys_record["type"],
"name": glesys_record["host"],
"ttl": glesys_record["ttl"],
"content": glesys_record["data"],
}
<|code_end|>
| lexicon/providers/glesys.py
<|code_start|>"""Module provider for Glesys"""
import json
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
NAMESERVER_DOMAINS = ["glesys.com"]
def provider_parser(subparser):
"""Generate a subparser for Glesys"""
subparser.add_argument("--auth-username", help="specify username (CL12345)")
subparser.add_argument("--auth-token", help="specify API key")
class Provider(BaseProvider):
"""Provider class for Glesys"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://api.glesys.com"
def _authenticate(self):
payload = self._get("/domain/list")
domains = payload["response"]["domains"]
for record in domains:
if record["domainname"] == self.domain:
# Domain records do not have any id.
# Since domain_id cannot be None, use domain name as id instead.
self.domain_id = record["domainname"]
break
else:
raise AuthenticationError("No domain found")
# Create record. If record already exists with the same content, do nothing.
def _create_record(self, rtype, name, content):
existing = self.list_records(rtype, name, content)
if existing:
# Already exists, do nothing.
return True
request_data = {
"domainname": self.domain,
"host": name,
"type": rtype,
"data": content,
}
self._addttl(request_data)
self._post("/domain/addrecord", data=request_data)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
request_data = {"domainname": self.domain}
payload = self._post("/domain/listrecords", data=request_data)
# Convert from Glesys record structure to Lexicon structure.
processed_records = [
self._glesysrecord2lexiconrecord(r) for r in payload["response"]["records"]
]
if rtype:
processed_records = [
record for record in processed_records if record["type"] == rtype
]
if name:
processed_records = [
record
for record in processed_records
if record["name"] == self._full_name(name)
]
if content:
processed_records = [
record
for record in processed_records
if record["content"].lower() == content.lower()
]
return processed_records
# Update a record. Identifier must be specified.
def _update_record(self, identifier, rtype=None, name=None, content=None):
request_data = {"recordid": identifier}
if name:
request_data["host"] = name
if rtype:
request_data["type"] = rtype
if content:
request_data["data"] = content
self._addttl(request_data)
self._post("/domain/updaterecord", data=request_data)
return True
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
for record_id in delete_record_id:
request_data = {"recordid": record_id}
self._post("/domain/deleterecord", data=request_data)
return True
# Helpers.
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
query_params["format"] = "json"
default_headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
credentials = (
self._get_provider_option("auth_username"),
self._get_provider_option("auth_token"),
)
response = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers=default_headers,
auth=credentials,
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
# Adds TTL parameter if passed as argument to lexicon.
def _addttl(self, request_data):
if self._get_lexicon_option("ttl"):
request_data["ttl"] = self._get_lexicon_option("ttl")
# From Glesys record structure: [u'domainname', u'recordid', u'type', u'host', u'ttl', u'data']
def _glesysrecord2lexiconrecord(self, glesys_record):
return {
"id": glesys_record["recordid"],
"type": glesys_record["type"],
"name": glesys_record["host"],
"ttl": glesys_record["ttl"],
"content": glesys_record["data"],
}
<|code_end|>
|
DuckDNS.org API support
The DuckDNS.org is a popular Dynamic DNS provider. And it supports the TXT records update which is used for issuing ACME certificates. Here is a sample https://github.com/acmesh-official/acme.sh/blob/master/dnsapi/dns_duckdns.sh
Could you please support it's API?
| lexicon/providers/duckdns.py
<|code_start|><|code_end|>
| lexicon/providers/duckdns.py
<|code_start|>"""\
Module provider for Duck DNS
The Duck DNS public API does not provide the whole set of features
supported by Lexicon: it only has one GET endpoint, "/update", authenticated by
the token which is given when you register with one of the supported
OAuth providers; the API only supports A, AAAA and TXT records, and
there is no other property that can be changed, like the TTL for these records;
the response message is plain text with "OK" or "KO" and only adds minimal
information when you supply the "verbose" parameter.
The DNS implementation supports only one record of each type
for each registered subdomain, and the value is propagated to every subdomain
after that, e.g. queries for testlexicon.duckdns.org and
example.given.testlexicon.duckdns.org return the same value.
Quirks of the DNS implementation:
* The A and AAAA records can be created separately, but when you delete one
the other is also deleted.
* When the A or AAAA records exist the TXT record is always present. Even if
you delete it will still be present with the value "". The implementation of
_list_records and _delete_record does not handle this special case.\
"""
import logging
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
try:
import dns.name
import dns.resolver
except ImportError:
pass
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["ca-central-1.compute.amazonaws.com"]
def provider_parser(subparser):
"""Module provider for Duck DNS"""
subparser.add_argument("--auth-token", help="specify the account token for authentication")
class Provider(BaseProvider):
"""Provider class for Duck DNS"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.api_url = "https://www.duckdns.org"
@staticmethod
def _get_duckdns_domain(domain):
domain_dns = dns.name.from_text(domain)
if len(domain_dns.labels) > 2 and b"duckdns" not in domain_dns.labels:
raise Exception("{} is not a valid Duck DNS domain.".format(domain))
if b"duckdns" in domain_dns.labels:
# use only the third level domain, the one actually registered in Duck DNS
domain_index = domain_dns.labels.index(b"duckdns") - 1
else:
# use the last name if it is relative
domain_index = -2
return domain_dns.labels[domain_index].decode("utf-8")
@staticmethod
def _get_duckdns_rtype_param(rtype):
if rtype not in ["A", "AAAA", "TXT"]:
raise Exception("Duck DNS only supports A, AAAA and TXT records.")
if rtype == "A":
return "ip"
elif rtype == "AAAA":
return "ipv6"
elif rtype == "TXT":
return "txt"
@staticmethod
def _get_dns_record(name, rtype):
if rtype not in ["A", "AAAA", "TXT"]:
raise Exception("Duck DNS only supports A, AAAA and TXT records.")
try:
dns_rrset = Provider._get_dns_rrset(name, rtype)
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
return {}
ttl = dns_rrset.ttl
if rtype == "A":
content = dns_rrset[0].address
elif rtype == "AAAA":
content = dns_rrset[0].address
else:
content = dns_rrset[0].strings[0].decode("utf-8")
record = {
"id": rtype,
"type": rtype,
"name": name,
"ttl": ttl,
"content": content,
}
return record
@staticmethod
def _get_dns_rrset(name, rtype):
resolver = dns.resolver.Resolver()
resolver.lifetime = 60
resolver.timeout = 70
return resolver.resolve(name, rtype).rrset
def _authenticate(self):
if self._get_provider_option("auth_token") is None:
raise AuthenticationError("Must provide account token")
# Create record. If the record already exists with the same content, do nothing"
def _create_record(self, rtype, name, content):
if self._get_lexicon_option("ttl"):
LOGGER.warning("create_record: Duck DNS does not support modifying the TTL, ignoring {}".format(self._get_lexicon_option("ttl")))
if rtype not in ["A", "AAAA", "TXT"]:
raise Exception("Duck DNS only supports A, AAAA and TXT records.")
if rtype == "A" and content == "auto":
duckdns_content = ""
else:
duckdns_content = content
params = {
"domains": Provider._get_duckdns_domain(self.domain),
Provider._get_duckdns_rtype_param(rtype): duckdns_content
}
result = self._api_call(params)
LOGGER.debug("create_record: %s", result)
return "OK" in result
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
duckdns_domain = Provider._get_duckdns_domain(self.domain)
full_duckdns_domain = duckdns_domain + ".duckdns.org"
records = [Provider._get_dns_record(full_duckdns_domain, rtype) for rtype in ["A", "AAAA", "TXT"]]
processed_records = [
{
"id": record["id"],
"type": record["type"],
"name": self._full_name(name) if name is not None else record["name"],
"ttl": record["ttl"],
"content": record["content"],
}
for record in records
if "id" in record
]
filtered_records = [
record
for record in processed_records
if (
(rtype is None or record["type"] == rtype)
and (name is None or record["name"] == self._full_name(name))
and (content is None or record["content"] == content)
)
]
LOGGER.debug("list_records: %s", filtered_records)
return filtered_records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
if self._get_lexicon_option("ttl"):
LOGGER.warning("update_record: Duck DNS does not support modifying the TTL, ignoring {}".format(self._get_lexicon_option("ttl")))
if not identifier:
identifier = self._get_record_identifier(rtype=rtype, name=name)
if rtype == "A" and content == "auto":
duckdns_content = ""
else:
duckdns_content = content
params = {
"domains": Provider._get_duckdns_domain(self.domain),
Provider._get_duckdns_rtype_param(identifier): duckdns_content
}
result = self._api_call(params)
LOGGER.debug("update_record: %s", result)
return "OK" in result
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
if not identifier:
identifier = self._get_record_identifier(
rtype=rtype, name=name, content=content, delete=True
)
if identifier is None:
LOGGER.debug("delete_record: no record found")
return True
params = {
"domains": Provider._get_duckdns_domain(self.domain),
Provider._get_duckdns_rtype_param(identifier): "",
"clear": "true"
}
result = self._api_call(params)
LOGGER.debug("delete_record: %s", result)
return "OK" in result
# Helpers
def _api_call(self, params):
response = self._request(query_params=dict(params.items()))
content = response.content.decode("utf-8")
if "KO" in content:
raise Exception("KO: {}".format(params))
return content
def _full_name(self, record_name):
record_name = record_name.rstrip(".")
full_domain = Provider._get_duckdns_domain(self.domain) + ".duckdns.org"
if not record_name.endswith(full_domain):
record_name = f"{record_name}.{full_domain}"
return record_name
def _get_record_identifier(self, rtype=None, name=None, content=None, delete=False):
records = self._list_records(rtype=rtype, name=name, content=content)
if len(records) == 1:
return records[0]["id"]
if delete and len(records) == 0:
return None
else:
raise Exception("Unambiguous record could not be found.")
def _request(self, action="GET", url="/", data=None, query_params=None):
if query_params is None:
query_params = {}
query_params["token"] = self._get_provider_option("auth_token")
query_params["verbose"] = "true"
response = requests.request(
"GET",
self.api_url + "/update",
params=query_params,
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response
<|code_end|>
|
Misleading error message: ProviderNotAvailableError - This provider (plesk) has required dependencies that are missing. Please install lexicon[plesk] first.
```
(venv) mypc:~/myprojects/dns-lexicon$ pip freeze
certifi==2019.9.11
cffi==1.13.2
chardet==3.0.4
cryptography==2.8
dns-lexicon==3.3.10
future==0.18.2
idna==2.8
pycparser==2.19
pyOpenSSL==19.0.0
PyYAML==5.1.2
requests==2.22.0
requests-file==1.4.3
six==1.13.0
tldextract==2.2.2
urllib3==1.25.7
(venv) mypc:~/myprojects/dns-lexicon$ python main.py
Traceback (most recent call last):
File "main.py", line 111, in <module>
update_server_record(data[0], data[1])
File "main.py", line 62, in update_server_record
plesk_record = list_action(filter_name=server_name, filter_type='A')
File "main.py", line 58, in list_action
return lexicon.client.Client(config).execute()
File "/home/joe/myprojects/dns-lexicon/venv/lib/python3.6/site-packages/lexicon/client.py", line 36, in __init__
self._validate_config()
File "/home/joe/myprojects/dns-lexicon/venv/lib/python3.6/site-packages/lexicon/client.py", line 105, in _validate_config
'Please install lexicon[{0}] first.'.format(provider_name))
lexicon.client.ProviderNotAvailableError: This provider (plesk) has required dependencies that are missing. Please install lexicon[plesk] first.
```
Error is misleading because of this part:
> Please install **lexicon[plesk]** first.
it should be
> Please install **dns-lexicon[plesk]** first.
| lexicon/client.py
<|code_start|>"""Main module of Lexicon. Defines the Client class, that holds all Lexicon logic."""
import importlib
import logging
import os
from typing import Dict, List, Optional, Type, Union, cast
import tldextract # type: ignore
from lexicon import config as helper_config
from lexicon import discovery
from lexicon.exceptions import ProviderNotAvailableError
from lexicon.providers.base import Provider
class Client(object):
"""This is the Lexicon client, that will execute all the logic."""
def __init__(
self, config: Optional[Union[helper_config.ConfigResolver, Dict]] = None
):
if not config:
# If there is not config specified, we load a non-interactive configuration.
self.config = helper_config.non_interactive_config_resolver()
elif not isinstance(config, helper_config.ConfigResolver):
# If config is not a ConfigResolver, we are in a legacy situation.
# We protect this part of the Client API.
self.config = helper_config.legacy_config_resolver(config)
else:
self.config = config
# Validate configuration
self._validate_config()
runtime_config = {}
# Process domain, strip subdomain
try:
domain_extractor = tldextract.TLDExtract(
cache_dir=_get_tldextract_cache_path(), include_psl_private_domains=True
)
except TypeError:
domain_extractor = tldextract.TLDExtract(
cache_file=_get_tldextract_cache_path(), include_psl_private_domains=True # type: ignore
)
domain_parts = domain_extractor(
cast(str, self.config.resolve("lexicon:domain"))
)
runtime_config["domain"] = f"{domain_parts.domain}.{domain_parts.suffix}"
delegated = self.config.resolve("lexicon:delegated")
if delegated:
# handle delegated domain
delegated = str(delegated).rstrip(".")
initial_domain = str(runtime_config.get("domain"))
if delegated != initial_domain:
# convert to relative name
if delegated.endswith(initial_domain):
delegated = delegated[: -len(initial_domain)]
delegated = delegated.rstrip(".")
# update domain
runtime_config["domain"] = f"{delegated}.{initial_domain}"
self.action = self.config.resolve("lexicon:action")
self.provider_name = self.config.resolve(
"lexicon:provider_name"
) or self.config.resolve("lexicon:provider")
if not self.provider_name:
raise ValueError("Could not resolve provider name.")
self.config.add_config_source(helper_config.DictConfigSource(runtime_config), 0)
provider_module = importlib.import_module(
"lexicon.providers." + self.provider_name
)
provider_class: Type[Provider] = getattr(provider_module, "Provider")
self.provider = provider_class(self.config)
def execute(self) -> Union[bool, List[Dict]]:
"""Execute provided configuration in class constructor to the DNS records"""
self.provider.authenticate()
identifier = self.config.resolve("lexicon:identifier")
record_type = self.config.resolve("lexicon:type")
name = self.config.resolve("lexicon:name")
content = self.config.resolve("lexicon:content")
if self.action == "create":
if not record_type or not name or not content:
raise ValueError("Missing record_type, name or content parameters.")
return self.provider.create_record(record_type, name, content)
if self.action == "list":
return self.provider.list_records(record_type, name, content)
if self.action == "update":
return self.provider.update_record(identifier, record_type, name, content)
if self.action == "delete":
return self.provider.delete_record(identifier, record_type, name, content)
raise ValueError(f"Invalid action statement: {self.action}")
def _validate_config(self) -> None:
provider_name = self.config.resolve("lexicon:provider_name")
if not provider_name:
raise AttributeError("provider_name")
try:
available = discovery.find_providers()[provider_name]
except KeyError:
raise ProviderNotAvailableError(
f"This provider ({provider_name}) is not supported by Lexicon."
)
else:
if not available:
raise ProviderNotAvailableError(
f"This provider ({provider_name}) has required extra dependencies that are missing. "
f"Please run `pip install lexicon[{provider_name}]` first before using it."
)
if not self.config.resolve("lexicon:action"):
raise AttributeError("action")
if not self.config.resolve("lexicon:domain"):
raise AttributeError("domain")
if not self.config.resolve("lexicon:type"):
raise AttributeError("type")
def _get_tldextract_cache_path() -> str:
if os.environ.get("TLDEXTRACT_CACHE_FILE"):
logging.warning(
"TLD_EXTRACT_CACHE_FILE environment variable is deprecated, please use TLDEXTRACT_CACHE_PATH instead."
)
os.environ["TLDEXTRACT_CACHE_PATH"] = os.environ["TLDEXTRACT_CACHE_FILE"]
return os.path.expanduser(
os.environ.get("TLDEXTRACT_CACHE_PATH", os.path.join("~", ".lexicon_tld_set"))
)
<|code_end|>
lexicon/discovery.py
<|code_start|>"""
This module takes care of finding information about the runtime of Lexicon:
* what are the providers installed, and available
* what is the version of Lexicon
"""
import pkgutil
import re
from typing import Dict
try:
from importlib.metadata import Distribution, PackageNotFoundError
except ModuleNotFoundError:
from importlib_metadata import Distribution, PackageNotFoundError # type: ignore[assignment]
from lexicon import providers
def find_providers() -> Dict[str, bool]:
"""Find all providers registered in Lexicon, and their availability"""
providers_list = sorted(
{
modname
for (_, modname, _) in pkgutil.iter_modules(providers.__path__) # type: ignore
if modname != "base"
}
)
try:
distribution = Distribution.from_name("dns-lexicon")
except PackageNotFoundError:
return {provider: True for provider in providers_list}
else:
return {
provider: _resolve_requirements(provider, distribution)
for provider in providers_list
}
def lexicon_version() -> str:
"""Retrieve current Lexicon version"""
try:
return Distribution.from_name("dns-lexicon").version
except PackageNotFoundError:
return "unknown"
def _resolve_requirements(provider: str, distribution: Distribution) -> bool:
requires = distribution.requires
if requires is None:
raise ValueError("Error while trying finding requirements.")
requirements = [
re.sub(r"^(.*)\s\(.*\)(?:;.*|)$", r"\1", requirement)
for requirement in requires
if f'extra == "{provider}"' in requirement
]
if not requirements:
# No extra for this provider
return True
for requirement in requirements:
try:
Distribution.from_name(requirement)
except PackageNotFoundError:
# At least one extra requirement is not fulfilled
return False
# All extra requirements are fulfilled
return True
<|code_end|>
| lexicon/client.py
<|code_start|>"""Main module of Lexicon. Defines the Client class, that holds all Lexicon logic."""
import importlib
import logging
import os
from typing import Dict, List, Optional, Type, Union, cast
import tldextract # type: ignore
from lexicon import config as helper_config
from lexicon import discovery
from lexicon.exceptions import ProviderNotAvailableError
from lexicon.providers.base import Provider
class Client(object):
"""This is the Lexicon client, that will execute all the logic."""
def __init__(
self, config: Optional[Union[helper_config.ConfigResolver, Dict]] = None
):
if not config:
# If there is not config specified, we load a non-interactive configuration.
self.config = helper_config.non_interactive_config_resolver()
elif not isinstance(config, helper_config.ConfigResolver):
# If config is not a ConfigResolver, we are in a legacy situation.
# We protect this part of the Client API.
self.config = helper_config.legacy_config_resolver(config)
else:
self.config = config
# Validate configuration
self._validate_config()
runtime_config = {}
# Process domain, strip subdomain
try:
domain_extractor = tldextract.TLDExtract(
cache_dir=_get_tldextract_cache_path(), include_psl_private_domains=True
)
except TypeError:
domain_extractor = tldextract.TLDExtract(
cache_file=_get_tldextract_cache_path(), include_psl_private_domains=True # type: ignore
)
domain_parts = domain_extractor(
cast(str, self.config.resolve("lexicon:domain"))
)
runtime_config["domain"] = f"{domain_parts.domain}.{domain_parts.suffix}"
delegated = self.config.resolve("lexicon:delegated")
if delegated:
# handle delegated domain
delegated = str(delegated).rstrip(".")
initial_domain = str(runtime_config.get("domain"))
if delegated != initial_domain:
# convert to relative name
if delegated.endswith(initial_domain):
delegated = delegated[: -len(initial_domain)]
delegated = delegated.rstrip(".")
# update domain
runtime_config["domain"] = f"{delegated}.{initial_domain}"
self.action = self.config.resolve("lexicon:action")
self.provider_name = self.config.resolve(
"lexicon:provider_name"
) or self.config.resolve("lexicon:provider")
if not self.provider_name:
raise ValueError("Could not resolve provider name.")
self.config.add_config_source(helper_config.DictConfigSource(runtime_config), 0)
provider_module = importlib.import_module(
"lexicon.providers." + self.provider_name
)
provider_class: Type[Provider] = getattr(provider_module, "Provider")
self.provider = provider_class(self.config)
def execute(self) -> Union[bool, List[Dict]]:
"""Execute provided configuration in class constructor to the DNS records"""
self.provider.authenticate()
identifier = self.config.resolve("lexicon:identifier")
record_type = self.config.resolve("lexicon:type")
name = self.config.resolve("lexicon:name")
content = self.config.resolve("lexicon:content")
if self.action == "create":
if not record_type or not name or not content:
raise ValueError("Missing record_type, name or content parameters.")
return self.provider.create_record(record_type, name, content)
if self.action == "list":
return self.provider.list_records(record_type, name, content)
if self.action == "update":
return self.provider.update_record(identifier, record_type, name, content)
if self.action == "delete":
return self.provider.delete_record(identifier, record_type, name, content)
raise ValueError(f"Invalid action statement: {self.action}")
def _validate_config(self) -> None:
provider_name = self.config.resolve("lexicon:provider_name")
if not provider_name:
raise AttributeError("provider_name")
try:
available = discovery.find_providers()[provider_name]
except KeyError:
raise ProviderNotAvailableError(
f"This provider ({provider_name}) is not supported by Lexicon."
)
else:
if not available:
raise ProviderNotAvailableError(
f"This provider ({provider_name}) has required extra dependencies that are missing. "
f"Please run `pip install dns-lexicon[{provider_name}]` first before using it."
)
if not self.config.resolve("lexicon:action"):
raise AttributeError("action")
if not self.config.resolve("lexicon:domain"):
raise AttributeError("domain")
if not self.config.resolve("lexicon:type"):
raise AttributeError("type")
def _get_tldextract_cache_path() -> str:
if os.environ.get("TLDEXTRACT_CACHE_FILE"):
logging.warning(
"TLD_EXTRACT_CACHE_FILE environment variable is deprecated, please use TLDEXTRACT_CACHE_PATH instead."
)
os.environ["TLDEXTRACT_CACHE_PATH"] = os.environ["TLDEXTRACT_CACHE_FILE"]
return os.path.expanduser(
os.environ.get("TLDEXTRACT_CACHE_PATH", os.path.join("~", ".lexicon_tld_set"))
)
<|code_end|>
lexicon/discovery.py
<|code_start|>"""
This module takes care of finding information about the runtime of Lexicon:
* what are the providers installed, and available
* what is the version of Lexicon
"""
import pkgutil
import re
from typing import Dict
try:
from importlib.metadata import Distribution, PackageNotFoundError
except ModuleNotFoundError:
from importlib_metadata import Distribution, PackageNotFoundError # type: ignore[assignment]
from lexicon import providers
def find_providers() -> Dict[str, bool]:
"""Find all providers registered in Lexicon, and their availability"""
providers_list = sorted(
{
modname
for (_, modname, _) in pkgutil.iter_modules(providers.__path__) # type: ignore
if modname != "base"
}
)
try:
distribution = Distribution.from_name("dns-lexicon")
except PackageNotFoundError:
return {provider: True for provider in providers_list}
else:
return {
provider: _resolve_requirements(provider, distribution)
for provider in providers_list
}
def lexicon_version() -> str:
"""Retrieve current Lexicon version"""
try:
return Distribution.from_name("dns-lexicon").version
except PackageNotFoundError:
return "unknown"
def _resolve_requirements(provider: str, distribution: Distribution) -> bool:
requires = distribution.requires
if requires is None:
raise ValueError("Error while trying finding requirements.")
requirements = [
re.sub(r"^(.*)\s\(.*\)(?:\s*;.*|)$", r"\1", requirement)
for requirement in requires
if f'extra == "{provider}"' in requirement
]
if not requirements:
# No extra for this provider
return True
for requirement in requirements:
try:
Distribution.from_name(requirement)
except PackageNotFoundError:
# At least one extra requirement is not fulfilled
return False
# All extra requirements are fulfilled
return True
<|code_end|>
|
Can't use lexicon with pending Cloudflare domains
I cannot use Lexicon with a domain that is `pending` (not `active`) in Cloudflare. It's useful to to be able to manipulate DNS records for `pending` domains before changing nameservers to minimize disruption.
## Context
1. Add a domain (e.g., `example.com`) in Cloudflare.
2. Do not change the nameservers for `example.com` to point to Cloudflare so that it remains with a `pending` status.
3. Add an API token in Cloudflare with Zone.DNS Edit and Zone.Zone Read permissions.
## Example
```sh
$ lexicon --version
lexicon 3.12.0
$ lexicon cloudflare --auth-token abc...XYZ list example.com A
Traceback (most recent call last):
File "/home/user/.local/bin/lexicon", line 8, in <module>
sys.exit(main())
File "/home/user/.local/pipx/venvs/dns-lexicon/lib/python3.9/site-packages/lexicon/cli.py", line 132, in main
results = client.execute()
File "/home/user/.local/pipx/venvs/dns-lexicon/lib/python3.9/site-packages/lexicon/client.py", line 81, in execute
self.provider.authenticate()
File "/home/user/.local/pipx/venvs/dns-lexicon/lib/python3.9/site-packages/lexicon/providers/base.py", line 73, in authenticate
self._authenticate()
File "/home/user/.local/pipx/venvs/dns-lexicon/lib/python3.9/site-packages/lexicon/providers/cloudflare.py", line 51, in _authenticate
raise AuthenticationError("No domain found")
lexicon.exceptions.AuthenticationError: No domain found
```
| lexicon/providers/cloudflare.py
<|code_start|>"""Module provider for Cloudflare"""
import json
import logging
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["cloudflare.com"]
def provider_parser(subparser):
"""Return the parser for this provider"""
subparser.description = """
There are two ways to provide an authentication granting edition to the target CloudFlare DNS zone.
1 - A Global API key, with --auth-username and --auth-token flags.
2 - An unscoped API token (permissions Zone:Zone(read) + Zone:DNS(edit) for all zones), with --auth-token flag.
3 - A scoped API token (permissions Zone:Zone(read) + Zone:DNS(edit) for one zone), with --auth-token and --zone-id flags.
"""
subparser.add_argument(
"--auth-username",
help="specify email address for authentication (for Global API key only)",
)
subparser.add_argument(
"--auth-token",
help="specify token for authentication (Global API key or API token)",
)
subparser.add_argument(
"--zone-id",
help="specify the zone id (if set, API token can be scoped to the target zone)",
)
class Provider(BaseProvider):
"""Provider class for Cloudflare"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://api.cloudflare.com/client/v4"
def _authenticate(self):
zone_id = self._get_provider_option("zone_id")
if not zone_id:
payload = self._get("/zones", {"name": self.domain, "status": "active"})
if not payload["result"]:
raise AuthenticationError("No domain found")
if len(payload["result"]) > 1:
raise AuthenticationError(
"Too many domains found. This should not happen"
)
self.domain_id = payload["result"][0]["id"]
else:
payload = self._get(f"/zones/{zone_id}")
if not payload["result"]:
raise AuthenticationError(f"No domain found for Zone ID {zone_id}")
self.domain_id = zone_id
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
content, cf_data = self._format_content(rtype, content)
data = {
"type": rtype,
"name": self._full_name(name),
"content": content,
"data": cf_data,
}
if self._get_lexicon_option("ttl"):
data["ttl"] = self._get_lexicon_option("ttl")
payload = {"success": True}
try:
payload = self._post(f"/zones/{self.domain_id}/dns_records", data)
except requests.exceptions.HTTPError as err:
already_exists = next(
(
True
for error in err.response.json()["errors"]
if error["code"] == 81057
),
False,
)
if not already_exists:
raise
LOGGER.debug("create_record: %s", payload["success"])
return payload["success"]
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
filter_obj = {"per_page": 100}
if rtype:
filter_obj["type"] = rtype
if name:
filter_obj["name"] = self._full_name(name)
if content:
filter_obj["content"] = content
records = []
while True:
payload = self._get(f"/zones/{self.domain_id}/dns_records", filter_obj)
LOGGER.debug("payload: %s", payload)
for record in payload["result"]:
processed_record = {
"type": record["type"],
"name": record["name"],
"ttl": record["ttl"],
"content": record["content"],
"id": record["id"],
}
records.append(processed_record)
pages = payload["result_info"]["total_pages"]
page = payload["result_info"]["page"]
if page >= pages:
break
filter_obj["page"] = page + 1
LOGGER.debug("list_records: %s", records)
LOGGER.debug("Number of records retrieved: %d", len(records))
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier is None:
records = self._list_records(rtype, name)
if len(records) == 1:
identifier = records[0]["id"]
elif len(records) < 1:
raise Exception(
"No records found matching type and name - won't update"
)
else:
raise Exception(
"Multiple records found matching type and name - won't update"
)
data = {}
if rtype:
data["type"] = rtype
if name:
data["name"] = self._full_name(name)
if content:
data["content"] = content
if self._get_lexicon_option("ttl"):
data["ttl"] = self._get_lexicon_option("ttl")
payload = self._put(f"/zones/{self.domain_id}/dns_records/{identifier}", data)
LOGGER.debug("update_record: %s", payload["success"])
return payload["success"]
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_id)
for record_id in delete_record_id:
self._delete(f"/zones/{self.domain_id}/dns_records/{record_id}")
LOGGER.debug("delete_record: %s", True)
return True
# Helpers
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
headers = {"Content-Type": "application/json"}
if self._get_provider_option("auth_username"):
headers["X-Auth-Email"] = self._get_provider_option("auth_username")
headers["X-Auth-Key"] = self._get_provider_option("auth_token")
else:
headers[
"Authorization"
] = f"Bearer {self._get_provider_option('auth_token')}"
response = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers=headers,
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
def _format_content(self, rtype, content):
"""
Special case handling from some record types that Cloudflare needs
formatted differently
Returns new values for the content and data properties to be sent
on the request
"""
data = None
if rtype == "SSHFP":
# For some reason the CloudFlare API does not let you set content
# directly when creating an SSHFP record. You need to pass the
# fields that make up the record seperately, then the API joins
# them back together
_fp = content.split(" ")
data = {"algorithm": _fp[0], "type": _fp[1], "fingerprint": _fp[2]}
content = None
return content, data
<|code_end|>
| lexicon/providers/cloudflare.py
<|code_start|>"""Module provider for Cloudflare"""
import json
import logging
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["cloudflare.com"]
def provider_parser(subparser):
"""Return the parser for this provider"""
subparser.description = """
There are two ways to provide an authentication granting edition to the target CloudFlare DNS zone.
1 - A Global API key, with --auth-username and --auth-token flags.
2 - An unscoped API token (permissions Zone:Zone(read) + Zone:DNS(edit) for all zones), with --auth-token flag.
3 - A scoped API token (permissions Zone:Zone(read) + Zone:DNS(edit) for one zone), with --auth-token and --zone-id flags.
"""
subparser.add_argument(
"--auth-username",
help="specify email address for authentication (for Global API key only)",
)
subparser.add_argument(
"--auth-token",
help="specify token for authentication (Global API key or API token)",
)
subparser.add_argument(
"--zone-id",
help="specify the zone id (if set, API token can be scoped to the target zone)",
)
class Provider(BaseProvider):
"""Provider class for Cloudflare"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://api.cloudflare.com/client/v4"
def _authenticate(self):
zone_id = self._get_provider_option("zone_id")
if not zone_id:
payload = self._get("/zones", {"name": self.domain})
if not payload["result"]:
raise AuthenticationError("No domain found")
if len(payload["result"]) > 1:
raise AuthenticationError(
"Too many domains found. This should not happen"
)
self.domain_id = payload["result"][0]["id"]
else:
payload = self._get(f"/zones/{zone_id}")
if not payload["result"]:
raise AuthenticationError(f"No domain found for Zone ID {zone_id}")
self.domain_id = zone_id
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
content, cf_data = self._format_content(rtype, content)
data = {
"type": rtype,
"name": self._full_name(name),
"content": content,
"data": cf_data,
}
if self._get_lexicon_option("ttl"):
data["ttl"] = self._get_lexicon_option("ttl")
payload = {"success": True}
try:
payload = self._post(f"/zones/{self.domain_id}/dns_records", data)
except requests.exceptions.HTTPError as err:
already_exists = next(
(
True
for error in err.response.json()["errors"]
if error["code"] == 81057
),
False,
)
if not already_exists:
raise
LOGGER.debug("create_record: %s", payload["success"])
return payload["success"]
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
filter_obj = {"per_page": 100}
if rtype:
filter_obj["type"] = rtype
if name:
filter_obj["name"] = self._full_name(name)
if content:
filter_obj["content"] = content
records = []
while True:
payload = self._get(f"/zones/{self.domain_id}/dns_records", filter_obj)
LOGGER.debug("payload: %s", payload)
for record in payload["result"]:
processed_record = {
"type": record["type"],
"name": record["name"],
"ttl": record["ttl"],
"content": record["content"],
"id": record["id"],
}
records.append(processed_record)
pages = payload["result_info"]["total_pages"]
page = payload["result_info"]["page"]
if page >= pages:
break
filter_obj["page"] = page + 1
LOGGER.debug("list_records: %s", records)
LOGGER.debug("Number of records retrieved: %d", len(records))
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier is None:
records = self._list_records(rtype, name)
if len(records) == 1:
identifier = records[0]["id"]
elif len(records) < 1:
raise Exception(
"No records found matching type and name - won't update"
)
else:
raise Exception(
"Multiple records found matching type and name - won't update"
)
data = {}
if rtype:
data["type"] = rtype
if name:
data["name"] = self._full_name(name)
if content:
data["content"] = content
if self._get_lexicon_option("ttl"):
data["ttl"] = self._get_lexicon_option("ttl")
payload = self._put(f"/zones/{self.domain_id}/dns_records/{identifier}", data)
LOGGER.debug("update_record: %s", payload["success"])
return payload["success"]
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self._list_records(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_id)
for record_id in delete_record_id:
self._delete(f"/zones/{self.domain_id}/dns_records/{record_id}")
LOGGER.debug("delete_record: %s", True)
return True
# Helpers
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
headers = {"Content-Type": "application/json"}
if self._get_provider_option("auth_username"):
headers["X-Auth-Email"] = self._get_provider_option("auth_username")
headers["X-Auth-Key"] = self._get_provider_option("auth_token")
else:
headers[
"Authorization"
] = f"Bearer {self._get_provider_option('auth_token')}"
response = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers=headers,
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
def _format_content(self, rtype, content):
"""
Special case handling from some record types that Cloudflare needs
formatted differently
Returns new values for the content and data properties to be sent
on the request
"""
data = None
if rtype == "SSHFP":
# For some reason the CloudFlare API does not let you set content
# directly when creating an SSHFP record. You need to pass the
# fields that make up the record seperately, then the API joins
# them back together
_fp = content.split(" ")
data = {"algorithm": _fp[0], "type": _fp[1], "fingerprint": _fp[2]}
content = None
return content, data
<|code_end|>
|
Hover fails with "HTTPError: 400 Client Error: Bad Request for url"
Example command line with sensitive info replaced by "XXXXX":
lexicon hover --log_level DEBUG --auth-username "XXXXX" --auth-password "XXXXX" list XXXXX TXT
...outputs this:
Arguments: Namespace(delegated=None, config_dir='/root', provider_name='hover', action='list', domain='XXXXX', type='TXT', name=None, content=None, ttl=None, priority=None, identifier=None, log_level='DEBUG', output='TABLE', auth_username='XXXXX', auth_password='XXXXX')
Starting new HTTPS connection (1): www.hover.com:443
https://www.hover.com:443 "GET /signin HTTP/1.1" 200 3653
Starting new HTTPS connection (1): www.hover.com:443
https://www.hover.com:443 "POST /signin/auth.json HTTP/1.1" 400 None
Traceback (most recent call last):
File "/usr/lib/python-exec/python3.9/lexicon", line 33, in <module>
sys.exit(load_entry_point('dns-lexicon==3.3.4', 'console_scripts', 'lexicon')())
File "/usr/lib/python3.9/site-packages/lexicon/cli.py", line 117, in main
results = client.execute()
File "/usr/lib/python3.9/site-packages/lexicon/client.py", line 71, in execute
self.provider.authenticate()
File "/usr/lib/python3.9/site-packages/lexicon/providers/base.py", line 69, in authenticate
return self._authenticate()
File "/usr/lib/python3.9/site-packages/lexicon/providers/hover.py", line 42, in _authenticate
response.raise_for_status()
File "/usr/lib/python3.9/site-packages/requests/models.py", line 943, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://www.hover.com/signin/auth.json
Credentials provided confirmed to be working elsewhere. (e.g., works fine with this tool: https://github.com/dschanoeh/hover-ddns )
| src/lexicon/_private/providers/hover.py
<|code_start|>"""Module provider for Hover"""
import json
import logging
from argparse import ArgumentParser
from typing import List
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.interfaces import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
class Provider(BaseProvider):
"""Provider class for Hover"""
@staticmethod
def get_nameservers() -> List[str]:
return ["hover.com"]
@staticmethod
def configure_parser(parser: ArgumentParser) -> None:
parser.add_argument(
"--auth-username", help="specify username for authentication"
)
parser.add_argument(
"--auth-password", help="specify password for authentication"
)
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://www.hover.com/api"
self.cookies = {}
def authenticate(self):
# Getting required cookies "hover_session" and "hoverauth"
response = requests.get("https://www.hover.com/signin")
self.cookies["hover_session"] = response.cookies["hover_session"]
payload = {
"username": self._get_provider_option("auth_username"),
"password": self._get_provider_option("auth_password"),
}
response = requests.post(
"https://www.hover.com/api/login/", json=payload, cookies=self.cookies
)
response.raise_for_status()
if "hoverauth" not in response.cookies:
raise Exception("Unexpected auth response")
self.cookies["hoverauth"] = response.cookies["hoverauth"]
# Make sure domain exists
# domain is stored in self.domain from BaseProvider
domains = self._list_domains()
for domain in domains:
if domain["name"] == self.domain:
self.domain_id = domain["id"]
break
else:
raise AuthenticationError(f"Domain {self.domain} not found")
def cleanup(self) -> None:
pass
def _list_domains(self):
response = self._get("/domains")
domains = []
for domain in response["domains"]:
processed_domain = {
"name": domain["domain_name"],
"id": domain["id"],
"active": (domain["status"] == "active"),
}
domains.append(processed_domain)
LOGGER.debug("list_domains: %s", domains)
return domains
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, rtype=None, name=None, content=None):
payload = self._get(f"/domains/{self.domain_id}/dns")
# payload['domains'] should be a list of len 1
try:
raw_records = payload["domains"][0]["entries"]
except (KeyError, IndexError):
raise Exception("Unexpected response")
processed_records = []
for record in raw_records:
processed_record = {
"type": record["type"],
"name": self._full_name(record["name"]),
"ttl": record["ttl"],
"content": record["content"],
"id": record["id"],
}
processed_records.append(processed_record)
if rtype:
processed_records = [
record for record in processed_records if record["type"] == rtype
]
if name:
name = self._relative_name(name)
processed_records = [
record for record in processed_records if name in record["name"]
]
if content:
processed_records = [
record
for record in processed_records
if record["content"].lower() == content.lower()
]
LOGGER.debug("list_records: %s", processed_records)
return processed_records
def create_record(self, rtype, name, content):
name = self._relative_name(name)
records = self.list_records(rtype, name, content)
if records:
LOGGER.debug("not creating duplicate record: %s", records[0])
return True
record = {"name": name, "type": rtype, "content": content}
if self._get_lexicon_option("ttl"):
record["ttl"] = self._get_lexicon_option("ttl")
LOGGER.debug("create_record: %s", record)
payload = self._post(f"/domains/{self.domain_id}/dns", record)
return payload["succeeded"]
# Update a record. Hover cannot update name so we delete and recreate.
def update_record(self, identifier, rtype=None, name=None, content=None):
if identifier:
records = self.list_records()
records = [r for r in records if r["id"] == identifier]
else:
records = self.list_records(rtype, name, None)
if not records:
raise Exception("Record not found")
if len(records) > 1:
raise Exception("Record not unique")
orig_record = records[0]
orig_id = orig_record["id"]
new_rtype = rtype if rtype else orig_record["type"]
new_name = name if name else orig_record["name"]
new_content = content if content else orig_record["content"]
self.delete_record(orig_id)
return self.create_record(new_rtype, new_name, new_content)
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_ids = []
if not identifier:
records = self.list_records(rtype, name, content)
delete_record_ids = [record["id"] for record in records]
else:
delete_record_ids.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_ids)
for record_id in delete_record_ids:
self._delete(f"/dns/{record_id}")
LOGGER.debug("delete_record: %s", record_id)
return True
# Helpers
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
cookies=self.cookies,
headers={"Content-Type": "application/json"},
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
try:
return response.json()
except ValueError: # response is not json
raise Exception("Did not get JSON response.")
<|code_end|>
src/lexicon/interfaces.py
<|code_start|>"""Base provider module for all Lexicon providers"""
from __future__ import annotations
from abc import ABC, abstractmethod
from argparse import ArgumentParser
from re import Pattern
from typing import Any
from lexicon.config import ConfigResolver, legacy_config_resolver
class Provider(ABC):
"""
This is the abstract class for all lexicon Providers.
It provides common functionality and ensures that all implemented
Providers follow a standard ducktype.
All standardized options will be provided here as defaults, but can be overwritten
by environmental variables and cli arguments.
Common options are:
action
domain
type
name
content
ttl
priority
identifier
The provider_env_cli_options will also contain any Provider specific options:
auth_username
auth_token
auth_password
...
:param config: is a ConfigResolver object that contains all the options
for this provider, merged from CLI and Env variables.
"""
def __init__(self, config: ConfigResolver | dict[str, Any]):
if not isinstance(config, ConfigResolver):
# If config is a plain dict, we are in a legacy situation.
# To protect the Provider API, the legacy dict is handled in a
# correctly defined ConfigResolver.
# Also, there may be some situation where `provider` key is not set in the config.
# It should not happen when Lexicon is called from Client, as it will set itself
# this key. However, there were no automated logic if the Provider is used directly.
# So we provide this logic here.
if not config.get("provider_name") and not config.get("provider"):
config[
"provider_name"
] = __name__ # Obviously we use the module name itself.
self.config = legacy_config_resolver(config)
else:
self.config = config
# Default ttl
self.config.with_dict({"ttl": 3600})
self.provider_name = self.config.resolve(
"lexicon:provider_name"
) or self.config.resolve("lexicon:provider")
self.domain = str(self.config.resolve("lexicon:domain"))
self.domain_id = None
# Provider API: instance methods
@abstractmethod
def authenticate(self) -> None:
"""
Authenticate against provider,
Make any requests required to get the domain's id for this provider,
so it can be used in subsequent calls.
Should throw AuthenticationError or requests.HTTPError if authentication fails for any reason,
of if the domain does not exist.
"""
def cleanup(self) -> None:
"""
Clean any relevant resource before this provider instance is closed.
"""
@abstractmethod
def create_record(self, rtype: str, name: str, content: str) -> bool:
"""
Create record. If record already exists with the same content, do nothing.
"""
@abstractmethod
def list_records(
self,
rtype: str | None = None,
name: str | None = None,
content: str | None = None,
) -> list[dict[str, Any]]:
"""
List all records. Return an empty list if no records found
type, name and content are used to filter records.
If possible filter during the query, otherwise filter after response is received.
"""
@abstractmethod
def update_record(
self,
identifier: str | None = None,
rtype: str | None = None,
name: str | None = None,
content: str | None = None,
) -> bool:
"""
Update a record. Identifier must be specified.
"""
@abstractmethod
def delete_record(
self,
identifier: str | None = None,
rtype: str | None = None,
name: str | None = None,
content: str | None = None,
) -> bool:
"""
Delete an existing record.
If record does not exist, do nothing.
If an identifier is specified, use it, otherwise do a lookup using type, name and content.
"""
# Provider API: static methods
@staticmethod
@abstractmethod
def get_nameservers() -> list[str] | list[Pattern]:
"""
Return the list of nameservers for this DNS provider
"""
@staticmethod
@abstractmethod
def configure_parser(parser: ArgumentParser) -> None:
"""
Configure the given parser for the provider needs
(e.g. specific CLI flags for auth)
"""
# Helpers
def _request(
self,
action: str = "GET",
url: str = "/",
data: dict[str, Any] | None = None,
query_params: dict[str, Any] | None = None,
) -> Any:
"""Execute an HTTP request against the DNS provider API"""
raise NotImplementedError(
"You must implement _request() to use _get()/_post()/_put()/_patch()/_delete() methods."
)
def _get(self, url: str = "/", query_params: dict[str, Any] | None = None) -> Any:
return self._request("GET", url, query_params=query_params)
def _post(
self,
url: str = "/",
data: dict[str, Any] | None = None,
query_params: dict[str, Any] | None = None,
) -> Any:
return self._request("POST", url, data=data, query_params=query_params)
def _put(
self,
url: str = "/",
data: dict[str, Any] | None = None,
query_params: dict[str, Any] | None = None,
) -> Any:
return self._request("PUT", url, data=data, query_params=query_params)
def _patch(
self,
url: str = "/",
data: dict[str, Any] | None = None,
query_params: dict[str, Any] | None = None,
) -> Any:
return self._request("PATCH", url, data=data, query_params=query_params)
def _delete(self, url: str = "/", query_params: dict[str, Any] | None = None) -> Any:
return self._request("DELETE", url, query_params=query_params)
def _fqdn_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if not record_name.endswith(self.domain):
record_name = f"{record_name}.{self.domain}"
return f"{record_name}." # return the fqdn name
def _full_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if not record_name.endswith(self.domain):
record_name = f"{record_name}.{self.domain}"
return record_name
def _relative_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if record_name.endswith(self.domain):
record_name = record_name[: -len(self.domain)]
record_name = record_name.rstrip(".")
return record_name
def _clean_TXT_record(self, record: dict[str, Any]) -> dict[str, Any]:
if record["type"] == "TXT":
# Some providers have quotes around the TXT records,
# so we're going to remove those extra quotes
record["content"] = record["content"][1:-1]
return record
def _get_lexicon_option(self, option: str) -> str | None:
return self.config.resolve(f"lexicon:{option}")
def _get_provider_option(self, option: str) -> str | None:
return self.config.resolve(f"lexicon:{self.provider_name}:{option}")
<|code_end|>
| src/lexicon/_private/providers/hover.py
<|code_start|>"""Module provider for Hover"""
import json
import logging
import re
from argparse import ArgumentParser
from typing import List
import pyotp
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.interfaces import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
class Provider(BaseProvider):
"""Provider class for Hover"""
@staticmethod
def get_nameservers() -> List[str]:
return ["hover.com"]
@staticmethod
def configure_parser(parser: ArgumentParser) -> None:
parser.add_argument(
"--auth-username", help="specify username for authentication"
)
parser.add_argument(
"--auth-password", help="specify password for authentication"
)
parser.add_argument(
"--auth-totp-secret",
help="specify base32-encoded shared secret to generate an OTP for authentication",
)
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://www.hover.com/api"
self.cookies = {}
shared_secret = re.sub(r"\s*", "", self._get_provider_option("auth_totp_secret") or "")
self.totp = pyotp.TOTP(shared_secret)
def authenticate(self) -> None:
# Getting required cookies "hover_session" and "hoverauth"
response = requests.get("https://www.hover.com/signin")
self.cookies["hover_session"] = response.cookies["hover_session"]
# Part one, login credentials
payload = {
"username": self._get_provider_option("auth_username"),
"password": self._get_provider_option("auth_password"),
}
response = requests.post(
"https://www.hover.com/signin/auth.json", json=payload, cookies=self.cookies
)
response.raise_for_status()
# Part two, 2fa
payload = {"code": self.totp.now()}
response = requests.post(
"https://www.hover.com/signin/auth2.json",
json=payload,
cookies=self.cookies,
)
response.raise_for_status()
if "hoverauth" not in response.cookies:
raise Exception("Unexpected auth response")
self.cookies["hoverauth"] = response.cookies["hoverauth"]
# Make sure domain exists
# domain is stored in self.domain from BaseProvider
domains = self._list_domains()
for domain in domains:
if domain["name"] == self.domain:
self.domain_id = domain["id"]
break
else:
raise AuthenticationError(f"Domain {self.domain} not found")
def cleanup(self) -> None:
pass
def _list_domains(self):
response = self._get("/domains")
domains = []
for domain in response["domains"]:
processed_domain = {
"name": domain["domain_name"],
"id": domain["id"],
"active": (domain["status"] == "active"),
}
domains.append(processed_domain)
LOGGER.debug("list_domains: %s", domains)
return domains
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, rtype=None, name=None, content=None):
payload = self._get(f"/control_panel/dns/{self.domain}")
# payload['domains'] should be a list of len 1
try:
raw_records = payload["domain"]["dns"]
except (KeyError, IndexError):
raise Exception("Unexpected response")
processed_records = []
for record in raw_records:
processed_record = {
"type": record["type"],
"name": self._full_name(record["name"]),
"ttl": record["ttl"],
"content": record["content"],
"id": record["id"],
}
processed_records.append(processed_record)
if rtype:
processed_records = [
record for record in processed_records if record["type"] == rtype
]
if name:
name = self._relative_name(name)
processed_records = [
record for record in processed_records if name in record["name"]
]
if content:
processed_records = [
record
for record in processed_records
if record["content"].lower() == content.lower()
]
LOGGER.debug("list_records: %s", processed_records)
return processed_records
def create_record(self, rtype, name, content):
name = self._relative_name(name)
records = self.list_records(rtype, name, content)
if records:
LOGGER.debug("not creating duplicate record: %s", records[0])
return True
record = {"name": name, "type": rtype, "content": content}
if self._get_lexicon_option("ttl"):
record["ttl"] = str(self._get_lexicon_option("ttl"))
LOGGER.debug("create_record: %s", record)
payload = {"id": f"domain-{self.domain}", "dns_record": record}
response = self._post("/control_panel/dns", payload)
return response["succeeded"]
# Update a record. Hover cannot update name so we delete and recreate.
def update_record(self, identifier=None, rtype=None, name=None, content=None):
if identifier:
records = self.list_records()
records = [r for r in records if r["id"] == identifier]
else:
records = self.list_records(rtype, name, None)
if not records:
raise Exception("Record not found")
if len(records) > 1:
raise Exception("Record not unique")
orig_record = records[0]
orig_id = orig_record["id"]
new_rtype = rtype if rtype else orig_record["type"]
new_name = name if name else orig_record["name"]
new_content = content if content else orig_record["content"]
self.delete_record(orig_id)
return self.create_record(new_rtype, new_name, new_content)
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_ids = []
if not identifier:
records = self.list_records(rtype, name, content)
delete_record_ids = [record["id"] for record in records]
else:
delete_record_ids.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_ids)
payload = {"domains": [{"id": f"domain-{self.domain}", "dns_records": delete_record_ids}]}
self._request("DELETE", "/control_panel/dns", payload)
return True
# Helpers
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
response = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
cookies=self.cookies,
headers={"Content-Type": "application/json"},
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
try:
return response.json()
except ValueError: # response is not json
raise Exception("Did not get JSON response.")
<|code_end|>
src/lexicon/interfaces.py
<|code_start|>"""Base provider module for all Lexicon providers"""
from __future__ import annotations
from abc import ABC, abstractmethod
from argparse import ArgumentParser
from re import Pattern
from typing import Any
from lexicon.config import ConfigResolver, legacy_config_resolver
class Provider(ABC):
"""
This is the abstract class for all lexicon Providers.
It provides common functionality and ensures that all implemented
Providers follow a standard ducktype.
All standardized options will be provided here as defaults, but can be overwritten
by environmental variables and cli arguments.
Common options are:
action
domain
type
name
content
ttl
priority
identifier
The provider_env_cli_options will also contain any Provider specific options:
auth_username
auth_token
auth_password
...
:param config: is a ConfigResolver object that contains all the options
for this provider, merged from CLI and Env variables.
"""
def __init__(self, config: ConfigResolver | dict[str, Any]):
if not isinstance(config, ConfigResolver):
# If config is a plain dict, we are in a legacy situation.
# To protect the Provider API, the legacy dict is handled in a
# correctly defined ConfigResolver.
# Also, there may be some situation where `provider` key is not set in the config.
# It should not happen when Lexicon is called from Client, as it will set itself
# this key. However, there were no automated logic if the Provider is used directly.
# So we provide this logic here.
if not config.get("provider_name") and not config.get("provider"):
config[
"provider_name"
] = __name__ # Obviously we use the module name itself.
self.config = legacy_config_resolver(config)
else:
self.config = config
# Default ttl
self.config.with_dict({"ttl": 3600})
self.provider_name = self.config.resolve(
"lexicon:provider_name"
) or self.config.resolve("lexicon:provider")
self.domain = str(self.config.resolve("lexicon:domain"))
self.domain_id = None
# Provider API: instance methods
@abstractmethod
def authenticate(self) -> None:
"""
Authenticate against provider,
Make any requests required to get the domain's id for this provider,
so it can be used in subsequent calls.
Should throw AuthenticationError or requests.HTTPError if authentication fails for any reason,
of if the domain does not exist.
"""
def cleanup(self) -> None:
"""
Clean any relevant resource before this provider instance is closed.
"""
@abstractmethod
def create_record(self, rtype: str, name: str, content: str) -> bool:
"""
Create record. If record already exists with the same content, do nothing.
"""
@abstractmethod
def list_records(
self,
rtype: str | None = None,
name: str | None = None,
content: str | None = None,
) -> list[dict[str, Any]]:
"""
List all records. Return an empty list if no records found
type, name and content are used to filter records.
If possible filter during the query, otherwise filter after response is received.
"""
@abstractmethod
def update_record(
self,
identifier: str | None = None,
rtype: str | None = None,
name: str | None = None,
content: str | None = None,
) -> bool:
"""
Update a record. Identifier must be specified.
"""
@abstractmethod
def delete_record(
self,
identifier: str | None = None,
rtype: str | None = None,
name: str | None = None,
content: str | None = None,
) -> bool:
"""
Delete an existing record.
If record does not exist, do nothing.
If an identifier is specified, use it, otherwise do a lookup using type, name and content.
"""
# Provider API: static methods
@staticmethod
@abstractmethod
def get_nameservers() -> list[str] | list[Pattern]:
"""
Return the list of nameservers for this DNS provider
"""
@staticmethod
@abstractmethod
def configure_parser(parser: ArgumentParser) -> None:
"""
Configure the given parser for the provider needs
(e.g. specific CLI flags for auth)
"""
# Helpers
def _request(
self,
action: str = "GET",
url: str = "/",
data: dict[str, Any] | None = None,
query_params: dict[str, Any] | None = None,
) -> Any:
"""Execute an HTTP request against the DNS provider API"""
raise NotImplementedError(
"You must implement _request() to use _get()/_post()/_put()/_patch()/_delete() methods."
)
def _get(self, url: str = "/", query_params: dict[str, Any] | None = None) -> Any:
return self._request("GET", url, query_params=query_params)
def _post(
self,
url: str = "/",
data: dict[str, Any] | None = None,
query_params: dict[str, Any] | None = None,
) -> Any:
return self._request("POST", url, data=data, query_params=query_params)
def _put(
self,
url: str = "/",
data: dict[str, Any] | None = None,
query_params: dict[str, Any] | None = None,
) -> Any:
return self._request("PUT", url, data=data, query_params=query_params)
def _patch(
self,
url: str = "/",
data: dict[str, Any] | None = None,
query_params: dict[str, Any] | None = None,
) -> Any:
return self._request("PATCH", url, data=data, query_params=query_params)
def _delete(
self, url: str = "/", query_params: dict[str, Any] | None = None
) -> Any:
return self._request("DELETE", url, query_params=query_params)
def _fqdn_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if not record_name.endswith(self.domain):
record_name = f"{record_name}.{self.domain}"
return f"{record_name}." # return the fqdn name
def _full_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if not record_name.endswith(self.domain):
record_name = f"{record_name}.{self.domain}"
return record_name
def _relative_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if record_name.endswith(self.domain):
record_name = record_name[: -len(self.domain)]
record_name = record_name.rstrip(".")
return record_name
def _clean_TXT_record(self, record: dict[str, Any]) -> dict[str, Any]:
if record["type"] == "TXT":
# Some providers have quotes around the TXT records,
# so we're going to remove those extra quotes
record["content"] = record["content"][1:-1]
return record
def _get_lexicon_option(self, option: str) -> str | None:
return self.config.resolve(f"lexicon:{option}")
def _get_provider_option(self, option: str) -> str | None:
return self.config.resolve(f"lexicon:{self.provider_name}:{option}")
<|code_end|>
|
[Feature]: Input Convert as cmd tool
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Feature
Converting input files from:
1. `json` → `toml` or `yaml`
2. `toml ` → `json ` or `yaml`
3. `yaml ` → `toml` or `toml `
### Possible Solution
_No response_
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| spectrafit/plugins/__init__.py
<|code_start|><|code_end|>
spectrafit/plugins/converter.py
<|code_start|><|code_end|>
spectrafit/tools.py
<|code_start|>"""Collection of essential tools for running SpectraFit."""
import json
import sys
from pathlib import Path
from typing import Any
from typing import Dict
from typing import MutableMapping
from typing import Optional
from typing import Tuple
import numpy as np
import pandas as pd
import toml
import yaml
from lmfit import Minimizer
from lmfit import conf_interval
from lmfit.minimizer import MinimizerException
from spectrafit.models import calculated_model
from spectrafit.report import fit_report_as_dict
class PreProcessing:
"""Summarized all pre-processing-filters together."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize PreProcessing class.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.df = df
self.args = args
def __call__(self) -> Tuple[pd.DataFrame, Dict[str, Any]]:
"""Apply all pre-processing-filters.
Returns:
pd.DataFrame: DataFrame containing the input data (`x` and `data`), which
are optionally:
1. shrinked
2. shifted
3. linear oversampled
4. smoothed
Dict[str,Any]: Adding a descriptive statistics to the input dictionary.
"""
_df = self.df.copy()
self.args["data_statistic"] = _df.describe(
percentiles=np.arange(0.1, 1, 0.1)
).to_dict(orient="list")
try:
if self.args["energy_start"] or self.args["energy_stop"]:
_df = self.energy_range(_df, self.args)
if self.args["shift"]:
_df = self.energy_shift(_df, self.args)
if self.args["oversampling"]:
_df = self.oversampling(_df, self.args)
if self.args["smooth"]:
_df = self.intensity_smooth(_df, self.args)
except KeyError as exc:
print(f"KeyError: {exc} is not part of the dataframe!")
sys.exit(1)
return (_df, self.args)
@staticmethod
def energy_range(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Select the energy range for fitting.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are shrinked according to the energy range.
"""
_e0 = args["energy_start"]
_e1 = args["energy_stop"]
_df = df.copy()
if isinstance(_e0, (int, float)) and isinstance(_e1, (int, float)):
return _df.loc[
(df[args["column"][0]] >= _e0) & (df[args["column"][0]] <= _e1)
]
elif isinstance(_e0, (int, float)):
return _df.loc[df[args["column"][0]] >= _e0]
elif isinstance(_e1, (int, float)):
return _df.loc[df[args["column"][0]] <= _e1]
@staticmethod
def energy_shift(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Shift the energy axis by a given value.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are energy-shifted by the given value.
"""
_df = df.copy()
_df.loc[:, args["column"][0]] = df[args["column"][0]].values + args["shift"]
return _df
@staticmethod
def oversampling(df: pd.DataFrame, args: dict) -> pd.DataFrame:
"""Oversampling the data to increase the resolution of the data.
!!! note "About Oversampling"
In this implementation of oversampling, the data is oversampled by the
factor of 5. In case of data with only a few points, the increased
resolution should allow to easier solve the optimization problem. The
oversampling based on a simple linear regression.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are oversampled by the factor of 5.
"""
x_values = np.linspace(
df[args["column"][0]].min(),
df[args["column"][0]].max(),
5 * df.shape[0],
)
return pd.DataFrame(
{
args["column"][0]: x_values,
args["column"][1]: np.interp(
x_values,
df[args["column"][0]].values,
df[args["column"][1]].values,
),
}
)
@staticmethod
def intensity_smooth(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Smooth the intensity values.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are smoothed by the given value.
"""
box = np.ones(args["smooth"]) / args["smooth"]
_df = df.copy()
_df.loc[:, args["column"][1]] = np.convolve(
df[args["column"][1]].values, box, mode="same"
)
return _df
class PostProcessing:
"""Post-processing of the dataframe."""
def __init__(
self, df: pd.DataFrame, args: Dict[str, Any], minimizer: Minimizer, result: Any
) -> None:
"""Initialize PostProcessing class.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
minimizer (Minimizer): The minimizer class.
result (Any): The result of the minimization of the best fit.
"""
self.args = args
self.df = self.rename_columns(df=df)
self.minimizer = minimizer
self.result = result
self.data_size = self.check_global_fitting()
def check_global_fitting(self) -> Optional[int]:
"""Check if the global fitting is performed.
!!! note "About Global Fitting"
In case of the global fitting, the data is extended by the single
contribution of the model.
Returns:
Optional[int]: The number of spectra of the global fitting.
"""
if self.args["global"]:
return max(
int(self.result.params[i].name.split("_")[-1])
for i in self.result.params
)
return None
def __call__(self) -> Tuple[pd.DataFrame, Dict[str, Any]]:
"""Call the post-processing."""
self.make_insight_report
self.make_residual_fit
self.make_fit_contributions
self.export_correlation2args
self.export_results2args
return (self.df, self.args)
def rename_columns(self, df: pd.DataFrame) -> pd.DataFrame:
"""Rename the columns of the dataframe.
Rename the columns of the dataframe to the names defined in the input file.
Args:
df (pd.DataFrame): DataFrame containing the original input data, which are
individually pre-named.
Returns:
pd.DataFrame: DataFrame containing renamed columns. All column-names are
lowered. In case of a regular fitting, the columns are named `energy`
and `intensity`. In case of a global fitting, `energy` stays `energy`
and `intensity` is extended by a `_` and column index; like: `energy`
and `intensity_1`, `intensity_2`, `intensity_...` depending on
the dataset size.
"""
if self.args["global"]:
return df.rename(
columns={
col: "energy" if i == 0 else f"intensity_{i}"
for i, col in enumerate(df.columns)
}
)
return df.rename(columns={df.columns[0]: "energy", df.columns[1]: "intensity"})
@property
def make_insight_report(self) -> None:
"""Make an insight-report of the fit statistic.
!!! note "About Insight Report"
The insight report based on:
1. Configurations
2. Statistics
3. Variables
4. Errorbars
5. Correlations
6. Covariance Matrix
7. _Optional_: Confidence Interval
All of the above are included in the report as dictionary in `args`.
"""
self.args["fit_insights"] = fit_report_as_dict(
self.result, modelpars=self.result.params
)
if self.args["conf_interval"]:
try:
self.args["confidence_interval"] = conf_interval(
self.minimizer, self.result, **self.args["conf_interval"]
)
except MinimizerException as exc:
print(f"Error: {exc} -> No confidence interval could be calculated!")
self.args["confidence_interval"] = None
@property
def make_residual_fit(self) -> None:
r"""Make the residuals of the model and the fit.
!!! note "About Residual and Fit"
The residual is calculated by the difference of the best fit `model` and
the reference `data`. In case of a global fitting, the residuals are
calculated for each `spectra` separately plus an avaraged global residual.
$$
\mathrm{residual} = \mathrm{model} - \mathrm{data}
$$
$$
\mathrm{residual}_{i} = \mathrm{model}_{i} - \mathrm{data}_{i}
$$
$$
\mathrm{residual}_{avg} = \frac{ \sum_{i}
\mathrm{model}_{i} - \mathrm{data}_{i}}{i}
$$
The fit is defined by the difference sum of fit and reference data. In case
of a global fitting, the residuals are calculated for each `spectra`
separately.
"""
_df = self.df.copy()
if self.args["global"]:
residual = self.result.residual.reshape((-1, self.data_size)).T
for i, res in enumerate(residual, start=1):
_df[f"residual_{i}"] = res
_df[f"fit_{i}"] = self.df[f"intensity_{i}"].values + res
_df["residual_avg"] = np.mean(residual, axis=0)
else:
residual = self.result.residual
_df["residual"] = residual
_df["fit"] = self.df["intensity"].values + residual
self.df = _df
@property
def make_fit_contributions(self) -> None:
"""Make the fit contributions of the best fit model.
!!! info "About Fit Contributions"
The fit contributions are made independently of the local or global fitting.
"""
self.df = calculated_model(
params=self.result.params,
x=self.df.iloc[:, 0].values,
df=self.df,
global_fit=self.args["global"],
)
@property
def export_correlation2args(self) -> None:
"""Export the correlation matrix to the input file arguments.
!!! note "About Correlation Matrix"
The correlation matrix is calculated from and for the pandas dataframe and
divided into two parts:
1. Linear correlation matrix
2. Non-linear correlation matrix (coming later ...)
"""
self.args["linear_correlation"] = self.df.corr().to_dict(orient="list")
@property
def export_results2args(self) -> None:
"""Export the results of the fit to the input file arguments."""
self.args["fit_result"] = self.df.to_dict(orient="list")
class SaveResult:
"""Saving the result of the fitting process."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize SaveResult class.
!!! note "About SaveResult"
The SaveResult class is responsible for saving the results of the
optimization process. The results are saved in the following formats:
1. JSON (default) for all results and meta data of the fitting process.
2. CSV for the results of the optimization process.
!!! note "About the output `CSV`-file"
The output files are seperated into three classes:
1. The `results` of the optimization process.
2. The `correlation analysis` of the optimization process.
3. The `error analysis` of the optimization process.
The result outputfile contains the following information:
1. The column names of the energy axis (`x`) and the intensity values
(`data`)
2. The name of the column containing the energy axis (`x`)
3. The name of the column containing the intensity values (`data`)
4. The name of the column containing the best fit (`best_fit`)
5. The name of the column containing the residuum (`residuum`)
6. The name of the column containing the model contribution (`model`)
7. The name of the column containing the error of the model
contribution (`model_error`)
8. The name of the column containing the error of the best fit
(`best_fit_error`)
9. The name of the column containing the error of the residuum
(`residuum_error`)
The `correlation analysis` file contains the following information about all
attributes of the model:
1. Energy
2. Intensity or Intensities (global fitting)
3. Residuum
4. Best fit
5. Model contribution(s)
The `error analysis` file contains the following information about all model
attributes vs:
1. Initial model values
2. Current model values
3. Best model values
4. Residuum / error relative to the best fit
5. Residuum / error relative to the absolute fit
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.df = df
self.args = args
def __call__(self) -> None:
"""Call the SaveResult class."""
self.save_as_json
self.save_as_csv
@property
def save_as_csv(self) -> None:
"""Save the the fit results to csv files.
!!! note "About saving the fit results"
The fit results are saved to csv files and are divided into three different
categories:
1. The `results` of the optimization process.
2. The `correlation analysis` of the optimization process.
3. The `error analysis` of the optimization process.
"""
self.df.to_csv(Path(f"{self.args['outfile']}_fit.csv"), index=False)
pd.DataFrame.from_dict(self.args["linear_correlation"]).to_csv(
Path(f"{self.args['outfile']}_correlation.csv"),
index=True,
index_label="attributes",
)
pd.DataFrame.from_dict(self.args["fit_insights"]["variables"]).to_csv(
Path(f"{self.args['outfile']}_errors.csv"),
index=True,
index_label="attributes",
)
@property
def save_as_json(self) -> None:
"""Save the fitting result as json file."""
if self.args["outfile"]:
with open(Path(f"{self.args['outfile']}_summary.json"), "w") as f:
json.dump(self.args, f, indent=4)
else:
raise FileNotFoundError("No output file provided!")
def read_input_file(fname: str) -> MutableMapping[str, Any]:
"""Read the input file.
Read the input file as `toml`, `json`, or `yaml` files and return as a dictionary.
Args:
fname (str): Name of the input file.
Raises:
OSError: If the input file is not supported.
Returns:
dict: Return the input file arguments as a dictionary with additional
information beyond the command line arguments.
"""
_fname = Path(fname)
if _fname.suffix == ".toml":
args = toml.load(fname)
elif _fname.suffix == ".json":
with open(_fname, "r") as f:
args = json.load(f)
elif _fname.suffix in [".yaml", ".yml"]:
with open(_fname, "r") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
else:
raise OSError(
f"ERROR: Input file {fname} has not supported file format.\n"
"Supported fileformats are: '*.json', '*.yaml', and '*.toml'"
)
return args
def load_data(args: Dict[str, str]) -> pd.DataFrame:
"""Load the data from a txt file.
!!! note "About the data format"
Load data from a txt file, which can be an ASCII file as txt, csv, or
user-specific but rational file. The file can be separated by a delimiter.
In case of 2d data, the columns has to be defined. In case of 3D data, all
columns are considered as data.
Args:
args (Dict[str,str]): The input file arguments as a dictionary with additional
information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will be
extended by the single contribution of the model.
"""
try:
if args["global"]:
return pd.read_csv(
Path(args["infile"]),
sep=args["separator"],
header=args["header"],
dtype=np.float64,
decimal=args["decimal"],
comment=args["comment"],
)
return pd.read_csv(
Path(args["infile"]),
sep=args["separator"],
header=args["header"],
usecols=args["column"],
dtype=np.float64,
decimal=args["decimal"],
comment=args["comment"],
)
except ValueError as exc:
print(f"Error: {exc} -> Dataframe contains non numeric data!")
sys.exit(1)
def check_keywords_consistency(
check_args: MutableMapping[str, Any], ref_args: Dict[str, Any]
) -> None:
"""Check if the keywords are consistent.
Check if the keywords are consistent between two dictionaries. The two dictionaries
are reference keywords of the `cmd_line_args` and the `args` of the `input_file`.
Args:
check_args (MutableMapping[str, Any]): First dictionary to be checked.
ref_args (Dict[str,Any]): Second dictionary to be checked.
Raises:
KeyError: If the keywords are not consistent.
"""
for key in check_args:
if key not in ref_args.keys():
raise KeyError(f"ERROR: The {key} is not parameter of the `cmd-input`!")
<|code_end|>
| spectrafit/plugins/__init__.py
<|code_start|>"""Plugins for SpectraFit."""
<|code_end|>
spectrafit/plugins/converter.py
<|code_start|>"""Convert the input and output files to the preferred file format."""
import argparse
import json
from pathlib import Path
from typing import Any
from typing import Dict
import toml
import yaml
from spectrafit.tools import read_input_file
choices = ["json", "yaml", "yml", "toml", "lock"]
def get_args() -> Dict[str, Any]:
"""Get the arguments from the command line.
Returns:
Dict[str, Any]: Return the input file arguments as a dictionary without
additional information beyond the command line arguments.
"""
parser = argparse.ArgumentParser(
description="Converter for 'SpectraFit' input and output files."
)
parser.add_argument(
"infile", type=Path, help="Filename of the 'SpectraFit' input or output file."
)
parser.add_argument(
"-f",
"--format",
help="File format for the conversion.",
type=str,
choices=choices,
)
return vars(parser.parse_args())
def convert(args: Dict[str, Any]) -> None:
"""Convert the input file to the output file.
Args:
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Raises:
ValueError: If the input file format is identical with the output format.
ValueError: If the output file format is not supported.
"""
if args["infile"].suffix[1:] == args["format"]:
raise ValueError(
f"The input file suffix '{args['infile'].suffix[1:]}' is similar to the"
f" output file format '{args['format']}'."
"Please use a different output file suffix."
)
if args["format"] not in choices:
raise ValueError(f"The output file format '{args['format']}' is not supported.")
data = read_input_file(args["infile"])
if args["format"] == "json":
# Convert the input file to a JSON file
with open(args["infile"].with_suffix(".json"), "w", encoding="utf8") as f:
json.dump(data, f, indent=4)
elif args["format"] == "yaml":
with open(args["infile"].with_suffix(".yaml"), "w", encoding="utf8") as f:
yaml.dump(data, f, default_flow_style=False)
elif args["format"] in ["toml", "lock"]:
with open(args["infile"].with_suffix(".toml"), "w", encoding="utf8") as f:
toml.dump(data, f)
def command_line_runner() -> None:
"""Run the converter via cmd commands."""
convert(get_args())
<|code_end|>
spectrafit/tools.py
<|code_start|>"""Collection of essential tools for running SpectraFit."""
import json
import sys
from pathlib import Path
from typing import Any
from typing import Dict
from typing import MutableMapping
from typing import Optional
from typing import Tuple
import numpy as np
import pandas as pd
import toml
import yaml
from lmfit import Minimizer
from lmfit import conf_interval
from lmfit.minimizer import MinimizerException
from spectrafit.models import calculated_model
from spectrafit.report import fit_report_as_dict
class PreProcessing:
"""Summarized all pre-processing-filters together."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize PreProcessing class.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.df = df
self.args = args
def __call__(self) -> Tuple[pd.DataFrame, Dict[str, Any]]:
"""Apply all pre-processing-filters.
Returns:
pd.DataFrame: DataFrame containing the input data (`x` and `data`), which
are optionally:
1. shrinked
2. shifted
3. linear oversampled
4. smoothed
Dict[str,Any]: Adding a descriptive statistics to the input dictionary.
"""
_df = self.df.copy()
self.args["data_statistic"] = _df.describe(
percentiles=np.arange(0.1, 1, 0.1)
).to_dict(orient="list")
try:
if self.args["energy_start"] or self.args["energy_stop"]:
_df = self.energy_range(_df, self.args)
if self.args["shift"]:
_df = self.energy_shift(_df, self.args)
if self.args["oversampling"]:
_df = self.oversampling(_df, self.args)
if self.args["smooth"]:
_df = self.intensity_smooth(_df, self.args)
except KeyError as exc:
print(f"KeyError: {exc} is not part of the dataframe!")
sys.exit(1)
return (_df, self.args)
@staticmethod
def energy_range(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Select the energy range for fitting.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are shrinked according to the energy range.
"""
_e0 = args["energy_start"]
_e1 = args["energy_stop"]
_df = df.copy()
if isinstance(_e0, (int, float)) and isinstance(_e1, (int, float)):
return _df.loc[
(df[args["column"][0]] >= _e0) & (df[args["column"][0]] <= _e1)
]
elif isinstance(_e0, (int, float)):
return _df.loc[df[args["column"][0]] >= _e0]
elif isinstance(_e1, (int, float)):
return _df.loc[df[args["column"][0]] <= _e1]
@staticmethod
def energy_shift(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Shift the energy axis by a given value.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are energy-shifted by the given value.
"""
_df = df.copy()
_df.loc[:, args["column"][0]] = df[args["column"][0]].values + args["shift"]
return _df
@staticmethod
def oversampling(df: pd.DataFrame, args: dict) -> pd.DataFrame:
"""Oversampling the data to increase the resolution of the data.
!!! note "About Oversampling"
In this implementation of oversampling, the data is oversampled by the
factor of 5. In case of data with only a few points, the increased
resolution should allow to easier solve the optimization problem. The
oversampling based on a simple linear regression.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are oversampled by the factor of 5.
"""
x_values = np.linspace(
df[args["column"][0]].min(),
df[args["column"][0]].max(),
5 * df.shape[0],
)
return pd.DataFrame(
{
args["column"][0]: x_values,
args["column"][1]: np.interp(
x_values,
df[args["column"][0]].values,
df[args["column"][1]].values,
),
}
)
@staticmethod
def intensity_smooth(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Smooth the intensity values.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are smoothed by the given value.
"""
box = np.ones(args["smooth"]) / args["smooth"]
_df = df.copy()
_df.loc[:, args["column"][1]] = np.convolve(
df[args["column"][1]].values, box, mode="same"
)
return _df
class PostProcessing:
"""Post-processing of the dataframe."""
def __init__(
self, df: pd.DataFrame, args: Dict[str, Any], minimizer: Minimizer, result: Any
) -> None:
"""Initialize PostProcessing class.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
minimizer (Minimizer): The minimizer class.
result (Any): The result of the minimization of the best fit.
"""
self.args = args
self.df = self.rename_columns(df=df)
self.minimizer = minimizer
self.result = result
self.data_size = self.check_global_fitting()
def check_global_fitting(self) -> Optional[int]:
"""Check if the global fitting is performed.
!!! note "About Global Fitting"
In case of the global fitting, the data is extended by the single
contribution of the model.
Returns:
Optional[int]: The number of spectra of the global fitting.
"""
if self.args["global"]:
return max(
int(self.result.params[i].name.split("_")[-1])
for i in self.result.params
)
return None
def __call__(self) -> Tuple[pd.DataFrame, Dict[str, Any]]:
"""Call the post-processing."""
self.make_insight_report
self.make_residual_fit
self.make_fit_contributions
self.export_correlation2args
self.export_results2args
return (self.df, self.args)
def rename_columns(self, df: pd.DataFrame) -> pd.DataFrame:
"""Rename the columns of the dataframe.
Rename the columns of the dataframe to the names defined in the input file.
Args:
df (pd.DataFrame): DataFrame containing the original input data, which are
individually pre-named.
Returns:
pd.DataFrame: DataFrame containing renamed columns. All column-names are
lowered. In case of a regular fitting, the columns are named `energy`
and `intensity`. In case of a global fitting, `energy` stays `energy`
and `intensity` is extended by a `_` and column index; like: `energy`
and `intensity_1`, `intensity_2`, `intensity_...` depending on
the dataset size.
"""
if self.args["global"]:
return df.rename(
columns={
col: "energy" if i == 0 else f"intensity_{i}"
for i, col in enumerate(df.columns)
}
)
return df.rename(columns={df.columns[0]: "energy", df.columns[1]: "intensity"})
@property
def make_insight_report(self) -> None:
"""Make an insight-report of the fit statistic.
!!! note "About Insight Report"
The insight report based on:
1. Configurations
2. Statistics
3. Variables
4. Errorbars
5. Correlations
6. Covariance Matrix
7. _Optional_: Confidence Interval
All of the above are included in the report as dictionary in `args`.
"""
self.args["fit_insights"] = fit_report_as_dict(
self.result, modelpars=self.result.params
)
if self.args["conf_interval"]:
try:
self.args["confidence_interval"] = conf_interval(
self.minimizer, self.result, **self.args["conf_interval"]
)
except MinimizerException as exc:
print(f"Error: {exc} -> No confidence interval could be calculated!")
self.args["confidence_interval"] = None
@property
def make_residual_fit(self) -> None:
r"""Make the residuals of the model and the fit.
!!! note "About Residual and Fit"
The residual is calculated by the difference of the best fit `model` and
the reference `data`. In case of a global fitting, the residuals are
calculated for each `spectra` separately plus an avaraged global residual.
$$
\mathrm{residual} = \mathrm{model} - \mathrm{data}
$$
$$
\mathrm{residual}_{i} = \mathrm{model}_{i} - \mathrm{data}_{i}
$$
$$
\mathrm{residual}_{avg} = \frac{ \sum_{i}
\mathrm{model}_{i} - \mathrm{data}_{i}}{i}
$$
The fit is defined by the difference sum of fit and reference data. In case
of a global fitting, the residuals are calculated for each `spectra`
separately.
"""
_df = self.df.copy()
if self.args["global"]:
residual = self.result.residual.reshape((-1, self.data_size)).T
for i, res in enumerate(residual, start=1):
_df[f"residual_{i}"] = res
_df[f"fit_{i}"] = self.df[f"intensity_{i}"].values + res
_df["residual_avg"] = np.mean(residual, axis=0)
else:
residual = self.result.residual
_df["residual"] = residual
_df["fit"] = self.df["intensity"].values + residual
self.df = _df
@property
def make_fit_contributions(self) -> None:
"""Make the fit contributions of the best fit model.
!!! info "About Fit Contributions"
The fit contributions are made independently of the local or global fitting.
"""
self.df = calculated_model(
params=self.result.params,
x=self.df.iloc[:, 0].values,
df=self.df,
global_fit=self.args["global"],
)
@property
def export_correlation2args(self) -> None:
"""Export the correlation matrix to the input file arguments.
!!! note "About Correlation Matrix"
The correlation matrix is calculated from and for the pandas dataframe and
divided into two parts:
1. Linear correlation matrix
2. Non-linear correlation matrix (coming later ...)
"""
self.args["linear_correlation"] = self.df.corr().to_dict(orient="list")
@property
def export_results2args(self) -> None:
"""Export the results of the fit to the input file arguments."""
self.args["fit_result"] = self.df.to_dict(orient="list")
class SaveResult:
"""Saving the result of the fitting process."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize SaveResult class.
!!! note "About SaveResult"
The SaveResult class is responsible for saving the results of the
optimization process. The results are saved in the following formats:
1. JSON (default) for all results and meta data of the fitting process.
2. CSV for the results of the optimization process.
!!! note "About the output `CSV`-file"
The output files are seperated into three classes:
1. The `results` of the optimization process.
2. The `correlation analysis` of the optimization process.
3. The `error analysis` of the optimization process.
The result outputfile contains the following information:
1. The column names of the energy axis (`x`) and the intensity values
(`data`)
2. The name of the column containing the energy axis (`x`)
3. The name of the column containing the intensity values (`data`)
4. The name of the column containing the best fit (`best_fit`)
5. The name of the column containing the residuum (`residuum`)
6. The name of the column containing the model contribution (`model`)
7. The name of the column containing the error of the model
contribution (`model_error`)
8. The name of the column containing the error of the best fit
(`best_fit_error`)
9. The name of the column containing the error of the residuum
(`residuum_error`)
The `correlation analysis` file contains the following information about all
attributes of the model:
1. Energy
2. Intensity or Intensities (global fitting)
3. Residuum
4. Best fit
5. Model contribution(s)
The `error analysis` file contains the following information about all model
attributes vs:
1. Initial model values
2. Current model values
3. Best model values
4. Residuum / error relative to the best fit
5. Residuum / error relative to the absolute fit
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.df = df
self.args = args
def __call__(self) -> None:
"""Call the SaveResult class."""
self.save_as_json
self.save_as_csv
@property
def save_as_csv(self) -> None:
"""Save the the fit results to csv files.
!!! note "About saving the fit results"
The fit results are saved to csv files and are divided into three different
categories:
1. The `results` of the optimization process.
2. The `correlation analysis` of the optimization process.
3. The `error analysis` of the optimization process.
"""
self.df.to_csv(Path(f"{self.args['outfile']}_fit.csv"), index=False)
pd.DataFrame.from_dict(self.args["linear_correlation"]).to_csv(
Path(f"{self.args['outfile']}_correlation.csv"),
index=True,
index_label="attributes",
)
pd.DataFrame.from_dict(self.args["fit_insights"]["variables"]).to_csv(
Path(f"{self.args['outfile']}_errors.csv"),
index=True,
index_label="attributes",
)
@property
def save_as_json(self) -> None:
"""Save the fitting result as json file."""
if self.args["outfile"]:
with open(
Path(f"{self.args['outfile']}_summary.json"), "w", encoding="utf8"
) as f:
json.dump(self.args, f, indent=4)
else:
raise FileNotFoundError("No output file provided!")
def read_input_file(fname: Path) -> MutableMapping[str, Any]:
"""Read the input file.
Read the input file as `toml`, `json`, or `yaml` files and return as a dictionary.
Args:
fname (str): Name of the input file.
Raises:
OSError: If the input file is not supported.
Returns:
dict: Return the input file arguments as a dictionary with additional
information beyond the command line arguments.
"""
fname = Path(fname)
if fname.suffix == ".toml":
args = toml.load(fname)
elif fname.suffix == ".json":
with open(fname, "r", encoding="utf8") as f:
args = json.load(f)
elif fname.suffix in [".yaml", ".yml"]:
with open(fname, "r", encoding="utf8") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
else:
raise OSError(
f"ERROR: Input file {fname} has not supported file format.\n"
"Supported fileformats are: '*.json', '*.yaml', and '*.toml'"
)
return args
def load_data(args: Dict[str, str]) -> pd.DataFrame:
"""Load the data from a txt file.
!!! note "About the data format"
Load data from a txt file, which can be an ASCII file as txt, csv, or
user-specific but rational file. The file can be separated by a delimiter.
In case of 2d data, the columns has to be defined. In case of 3D data, all
columns are considered as data.
Args:
args (Dict[str,str]): The input file arguments as a dictionary with additional
information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will be
extended by the single contribution of the model.
"""
try:
if args["global"]:
return pd.read_csv(
args["infile"],
sep=args["separator"],
header=args["header"],
dtype=np.float64,
decimal=args["decimal"],
comment=args["comment"],
)
return pd.read_csv(
args["infile"],
sep=args["separator"],
header=args["header"],
usecols=args["column"],
dtype=np.float64,
decimal=args["decimal"],
comment=args["comment"],
)
except ValueError as exc:
print(f"Error: {exc} -> Dataframe contains non numeric data!")
sys.exit(1)
def check_keywords_consistency(
check_args: MutableMapping[str, Any], ref_args: Dict[str, Any]
) -> None:
"""Check if the keywords are consistent.
Check if the keywords are consistent between two dictionaries. The two dictionaries
are reference keywords of the `cmd_line_args` and the `args` of the `input_file`.
Args:
check_args (MutableMapping[str, Any]): First dictionary to be checked.
ref_args (Dict[str,Any]): Second dictionary to be checked.
Raises:
KeyError: If the keywords are not consistent.
"""
for key in check_args:
if key not in ref_args.keys():
raise KeyError(f"ERROR: The {key} is not parameter of the `cmd-input`!")
<|code_end|>
|
[Bug]: Correct Export of correlation as `*.csv`
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
```css
0,energy,energy,"[1.0, 0,...
```
### Expected Behavior
_No response_
### Steps To Reproduce
_No response_
### ⚙️ Environment
```markdown
- OS:
- Python: 3.10.2
- spectrafit: 0.16.1
```
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "0.16.1"
<|code_end|>
spectrafit/models.py
<|code_start|>"""Minimization models for curve fitting."""
from collections import defaultdict
from dataclasses import dataclass
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from lmfit import Minimizer
from lmfit import Parameters
from numpy.typing import NDArray
from scipy.signal import find_peaks
from scipy.special import erf
from scipy.special import wofz
from scipy.stats import hmean
from spectrafit.api.tools_model import AutopeakAPI
from spectrafit.api.tools_model import GlobalFittingAPI
from spectrafit.api.tools_model import SolverModelsAPI
class DistributionModels:
"""Distribution models for the fit.
!!! note "About distribution models"
`DistributionModels` are wrapper functions for the distribution models. The
overall goal is to extract from the best parameters the single contributions in
the model. The superposition of the single contributions is the final model.
!!! note "About the cumulative distribution"
The cumulative distribution is the sum of the single contributions. The
cumulative distribution is the model that is fitted to the data. In contrast to
the single contributions, the cumulative distribution is not normalized and
therefore the amplitude of the single contributions is not directly comparable
to the amplitude of the cumulative distribution. Also, the cumulative
distributions are consquently using the `fwhm` parameter instead of the
`sigma` parameter.
"""
def gaussian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Gaussian distribution.
$$
{\displaystyle g(x)={\frac {1}{\sigma {\sqrt {2\pi }}}}\exp
( -{\frac {1}{2}}{\frac {(x-\mu )^{2}}{\sigma ^{2}}} ) }
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Gaussian distribution.
Defaults to 1.0.
center (float, optional): Center of the Gaussian distribution.
Defaults to 0.0.
fwhmg (float, optional): Full width at half maximum (FWHM) of the Gaussian
distribution. Defaults to 1.0.
Returns:
NDArray[np.float64]: Gaussian distribution of `x` given.
"""
sigma = fwhmg / Constants.sig2fwhm
return np.array(amplitude / (Constants.sq2pi * sigma)) * np.exp(
-((1.0 * x - center) ** 2) / (2 * sigma**2)
)
def lorentzian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Lorentzian distribution.
$$
f(x;x_{0},\gamma )={\frac {1}{\pi \gamma
[ 1+ ( {\frac {x-x_{0}}{\gamma }})^{2} ]
}} ={1 \over \pi \gamma } [ {\gamma ^{2} \over (x-x_{0})^{2}+\gamma ^{2}} ]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Lorentzian distribution.
Defaults to 1.0.
center (float, optional): Center of the Lorentzian distribution. Defaults to
0.0.
fwhml (float, optional): Full width at half maximum (FWHM) of the Lorentzian
distribution. Defaults to 1.0.
Returns:
Union[NDArray[np.float64], float]: Lorentzian distribution of `x` given.
"""
sigma = fwhml / 2.0
return np.array(amplitude / (1 + ((1.0 * x - center) / sigma) ** 2)) / (
np.pi * sigma
)
def voigt(
self,
x: NDArray[np.float64],
center: float = 0.0,
fwhmv: float = 1.0,
gamma: Optional[float] = None,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Voigt distribution.
$$
{\displaystyle V(x;\sigma ,\gamma )\equiv
\int_{-\infty }^{\infty }G(x';\sigma )
L(x-x';\gamma )\,dx'}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
center (float, optional): Center of the Voigt distribution. Defaults to 0.0.
fwhmv (float, optional): Full width at half maximum (FWHM) of the Lorentzian
distribution. Defaults to 1.0.
gamma (float, optional): Scaling factor of the complex part of the
[Faddeeva Function](https://en.wikipedia.org/wiki/Faddeeva_function).
Defaults to None.
Returns:
NDArray[np.float64]: Voigt distribution of `x` given.
"""
sigma = fwhmv / 3.60131
if gamma is None:
gamma = sigma
z = (x - center + 1j * gamma) / (sigma * Constants.sq2)
return np.array(wofz(z).real / (sigma * Constants.sq2pi))
def pseudovoigt(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional Pseudo-Voigt distribution.
!!! note "See also:"
J. Appl. Cryst. (2000). 33, 1311-1316
https://doi.org/10.1107/S0021889800010219
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Pseudo-Voigt distribution.
Defaults to 1.0.
center (float, optional): Center of the Pseudo-Voigt distribution.
Defaults to 0.0.
fwhmg (float, optional): Full width half maximum of the Gaussian
distribution in the Pseudo-Voigt distribution. Defaults to 1.0.
fwhml (float, optional): Full width half maximum of the Lorentzian
distribution in the Pseudo-Voigt distribution. Defaults to 1.0.
Returns:
NDArray[np.float64]: Pseudo-Voigt distribution of `x` given.
"""
f = np.power(
fwhmg**5
+ 2.69269 * fwhmg**4 * fwhml
+ 2.42843 * fwhmg**3 * fwhml**2
+ 4.47163 * fwhmg**2 * fwhml**3
+ 0.07842 * fwhmg * fwhml**4
+ fwhml**5,
0.2,
)
n = (
1.36603 * (fwhml / f)
- 0.47719 * (fwhml / f) ** 2
+ 0.11116 * (fwhml / f) ** 3
)
return np.array(
n * self.lorentzian(x=x, amplitude=amplitude, center=center, fwhml=fwhml)
+ (1 - n)
* self.gaussian(x=x, amplitude=amplitude, center=center, fwhmg=fwhmg)
)
def exponential(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
decay: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional exponential decay.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the exponential function.
Defaults to 1.0.
decay (float, optional): Decay of the exponential function. Defaults to 1.0.
intercept (float, optional): Intercept of the exponential function.
Defaults to 0.0.
Returns:
NDArray[np.float64]: Exponential decay of `x` given.
"""
return np.array(amplitude * np.exp(-x / decay) + intercept)
def power(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
exponent: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional power function.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the power function. Defaults to
1.0.
exponent (float, optional): Exponent of the power function. Defaults to 1.0.
intercept (float, optional): Intercept of the power function. Defaults to
0.0.
Returns:
NDArray[np.float64]: power function of `x` given.
"""
return np.array(amplitude * np.power(x, exponent) + intercept)
def linear(
self,
x: NDArray[np.float64],
slope: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional linear function.
Args:
x (NDArray[np.float64]): `x`-values of the data.
slope (float, optional): Slope of the linear function. Defaults to 1.0.
intercept (float, optional): Intercept of the linear function.
Defaults to 0.0.
Returns:
NDArray[np.float64]: Linear function of `x` given.
"""
return np.array(slope * x + intercept)
def constant(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional constant value.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the constant. Defaults to 1.0.
Returns:
NDArray[np.float64]: Constant value of `x` given.
"""
return np.array(np.linspace(amplitude, amplitude, len(x)))
@staticmethod
def _norm(
x: NDArray[np.float64], center: float, sigma: float
) -> NDArray[np.float64]:
"""Normalize the data for step functions.
Args:
x (NDArray[np.float64]): `x`-values of the data.
center (float): Center of the step function.
sigma (float): Sigma of the step function.
Returns:
NDArray[np.float64]: Normalized data.
"""
if abs(sigma) < 1.0e-13:
sigma = 1.0e-13
return np.subtract(x, center) / sigma
def erf(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional error function.
$$
f(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the error function.
Defaults to 1.0.
center (float, optional): Center of the error function. Defaults to 0.0.
sigma (float, optional): Sigma of the error function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Error function of `x` given.
"""
return np.array(amplitude * 0.5 * (1 + erf(self._norm(x, center, sigma))))
def heaviside(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Heaviside step function.
$$
f(x) = \begin{cases}
0 & x < 0 \\
0.5 & x = 0 \\
1 & x > 0
\end{cases}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Heaviside step function.
Defaults to 1.0.
center (float, optional): Center of the Heaviside step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the Heaviside step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Heaviside step function of `x` given.
"""
return np.array(amplitude * 0.5 * (1 + np.sign(self._norm(x, center, sigma))))
def atan(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional arctan step function.
$$
f(x) = \frac{1}{\pi} \arctan(\frac{x - c}{s})
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the arctan step function.
Defaults to 1.0.
center (float, optional): Center of the arctan step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the arctan step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Arctan step function of `x` given.
"""
return np.array(
amplitude * 0.5 * (1 + np.arctan(self._norm(x, center, sigma)) / np.pi)
)
def log(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional logarithmic step function.
$$
f(x) = \frac{1}{1 + e^{-\frac{x - c}{s}}}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the logarithmic step function.
Defaults to 1.0.
center (float, optional): Center of the logarithmic step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the logarithmic step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Logarithmic step function of `x` given.
"""
return np.array(
amplitude * 0.5 * (1 + np.log(self._norm(x, center, sigma)) / np.pi)
)
def cgaussian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Gaussian function.
$$
f(x) = \frac{1}{2} \left[1 + erf\left(\frac{x - c}{s \sqrt{2}}\right)\right]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Gaussian function. Defaults to
1.0.
center (float, optional): Center of the Gaussian function. Defaults to 0.0.
fwhmg (float, optional): Full width at half maximum of the Gaussian
function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Gaussian function of `x` given.
"""
sigma = fwhmg / Constants.sig2fwhm
return np.array(
amplitude * 0.5 * (1 + erf((x - center) / (sigma * np.sqrt(2.0))))
)
def clorentzian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Lorentzian function.
$$
f(x) = \frac{1}{\pi} \arctan\left(\frac{x - c}{s}\right) + \frac{1}{2}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Lorentzian function.
Defaults to 1.0.
center (float, optional): Center of the Lorentzian function.
Defaults to 0.0.
fwhml (float, optional): Full width at half maximum of the Lorentzian
function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Lorentzian function of `x` given.
"""
sigma = fwhml / 2.0
return np.array(amplitude * (np.arctan((x - center) / sigma) / np.pi) + 0.5)
def cvoigt(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmv: float = 1.0,
gamma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Voigt function.
$$
f(x) = \frac{1}{2} \left[1 + erf\left(\frac{x - c}{s \sqrt{2}}\right)\right]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Voigt function. Defaults to
1.0.
center (float, optional): Center of the Voigt function. Defaults to 0.0.
fwhmv (float, optional): Full width at half maximum of the Voigt function.
Defaults to 1.0.
gamma (float, optional): Gamma of the Voigt function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Voigt function of `x` given.
"""
sigma = fwhmv / 3.60131
return np.array(
amplitude
* 0.5
* (1 + erf((x - center) / (sigma * np.sqrt(2.0))))
* np.exp(-(((x - center) / gamma) ** 2))
)
@dataclass(frozen=True)
class ReferenceKeys:
"""Reference keys for model fitting and peak detection."""
__models__ = [
func
for func in dir(DistributionModels)
if callable(getattr(DistributionModels, func)) and not func.startswith("_")
]
__automodels__ = [
"gaussian",
"lorentzian",
"voigt",
"pseudovoigt",
]
def model_check(self, model: str) -> None:
"""Check if model is available.
Args:
model (str): Model name.
Raises:
KeyError: If the model is not supported.
"""
if model.split("_")[0] not in self.__models__:
raise NotImplementedError(f"{model} is not supported!")
def automodel_check(self, model: str) -> None:
"""Check if model is available.
Args:
model (str): Auto Model name (gaussian, lorentzian, voigt, or pseudovoigt).
Raises:
KeyError: If the model is not supported.
"""
if model not in self.__automodels__:
raise KeyError(f"{model} is not supported!")
def detection_check(self, args: Dict[str, Any]) -> None:
"""Check if detection is available.
Args:
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Raises:
KeyError: If the key is not parameter of the `scipy.signal.find_peaks`
function. This will be checked via `pydantic` in `spectrafit.api`.
"""
AutopeakAPI(**args)
@dataclass(frozen=True)
class Constants:
r"""Mathematical constants for the curve models.
!!! info "Constants"
$$
log2 = \log{2 }
$$
$$
sq2pi = \sqrt{2 \pi}
$$
$$
sqpi = \sqrt{ \pi}
$$
$$
sq2 = \sqrt{2 }
$$
$$
sig2fwhm = 2 \sqrt{2\log{2 }}
$$
"""
log2 = np.log(2.0)
sq2pi = np.sqrt(2.0 * np.pi)
sqpi = np.sqrt(np.pi)
sq2 = np.sqrt(2.0)
sig2fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0))
class AutoPeakDetection:
"""Automatic detection of peaks in a spectrum."""
def __init__(
self,
x: NDArray[np.float64],
data: NDArray[np.float64],
args: Dict[str, Any],
) -> None:
"""Initialize the AutoPeakDetection class.
Args:
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 1d-array.
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.x = x
self.data = data
self._args = args["autopeak"]
@staticmethod
def check_key_exists(
key: str, args: Dict[str, Any], value: Union[float, Tuple[Any, Any]]
) -> Any:
"""Check if a key exists in a dictionary.
Please check for the reference key also [scipy.signal.find_peaks][1].
[1]:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
Args:
key (str): Reference key of `scipy.signal.find_peaks`.
args (Dict[str, Any]): Reference values of `scipy.signal.find_peaks`, if not
defined will be set to estimated default values.
value (Union[float, Tuple[float,float]]): Default value for the reference
key.
Returns:
Any: The reference value for `scipy.signal.find_peaks`.
"""
return args.get(key, value)
@property
def estimate_height(self) -> Tuple[float, float]:
r"""Estimate the initial height based on an inverse noise ratio of a signal.
!!! info "About the estimation of the height"
The lower end of the height is the inverse noise ratio of the `data`, and
upper limit is the maximum value of the `data`. The noise ratio of the
`data` is based on the original implementation by `SciPy`:
```python
def signaltonoise(a, axis=0, ddof=0):
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m / sd)
```
Returns:
Tuple[float, float]: Tuple of the inverse signal to noise ratio and
the maximum value of the `data`.
"""
return 1 - self.data.mean() / self.data.std(), self.data.max()
@property
def estimate_threshold(self) -> Tuple[float, float]:
"""Estimate the threshold value for the peak detection.
Returns:
Tuple[float, float]: Minimum and maximum value of the spectrum `data`,
respectively, `intensity`.
"""
return self.data.min(), self.data.max()
@property
def estimate_distance(self) -> float:
"""Estimate the initial distance between peaks.
Returns:
float: Estimated distance between peaks.
"""
min_step = np.diff(self.x).min()
return max(min_step, 1.0)
@property
def estimate_prominence(self) -> Tuple[float, float]:
"""Estimate the prominence of a peak.
!!! info "About the estimation of the prominence"
The prominence is the difference between the height of the peak and the
bottom. To get a estimate of the prominence, the height of the peak is
calculated by maximum value of the `data` and the bottom is calculated by
the harmonic mean of the `data`.
Returns:
Tuple[float, float]: Tuple of the harmonic-mean and maximum value of `data`.
"""
try:
return hmean(self.data), self.data.max()
except ValueError as exc:
print(f"{exc}: Using standard arithmetic mean of NumPy.\n")
return self.data.mean(), self.data.max()
@property
def estimated_width(self) -> Tuple[float, float]:
"""Estimate the width of a peak.
!!! info "About the estimation of the width"
The width of a peak is estimated for a lower and an upper end. For the lower
end, the minimum stepsize is used. For the upper end, the stepsize between
the half maximum and the minimum value of the `data` is used as the width.
Returns:
Tuple[float, float]: Estimated width lower and uper end of the peaks.
"""
return (
np.diff(self.x).min(),
np.abs(self.x[self.data.argmax()] - self.x[self.data.argmin()]) / 2,
)
@property
def estimated_rel_height(self) -> float:
"""Estimate the relative height of a peak.
!!! info "About the estimation of the relative height"
The relative height of a peak is approximated by the difference of the
harmonic mean value of the `data` and the minimum value of the `data`
divided by the factor of `4`. In case of negative ratios, the value will be
set to `Zero`.
Returns:
float: Estimated relative height of a peak.
"""
try:
rel_height = (hmean(self.data) - self.data.min()) / 4
except ValueError as exc:
print(f"{exc}: Using standard arithmetic mean of NumPy.\n")
rel_height = (self.data.mean() - self.data.min()) / 4
return rel_height if rel_height > 0 else 0.0
@property
def estimated_wlen(self) -> float:
r"""Estimate the window length for the peak detection.
!!! info "About the estimation of the window length"
The window length is the length of the window for the peak detection is
defined to be 1% of the length of the `data`, consequently the len of the
`data` is divided by 100. In case of a window length smaller than 1, the
window length will be set to numerical value of 1, which is defined by
`1 + 1e-9`.
Returns:
float: Estimated window length is set to the numeric value of > 1.
"""
wlen = self.data.size / 100
return wlen if wlen > 1.0 else 1 + 1e-9
@property
def estimated_plateau_size(self) -> Tuple[float, float]:
"""Estimate the plateau size for the peak detection.
Returns:
Tuple[float, float]: Estimated plateau size is set to `zero` for the lower
end and the maximum value of the `x` for the upper end.
"""
return 0.0, self.x.max()
def initialize_peak_detection(self) -> None:
"""Initialize the peak detection.
!!! note "Initialize the peak detection"
This method is used to initialize the peak detection. The initialization can
be activated by setting the `initialize` attribute to `True`, which will
automatically estimate the default parameters for the peak detection. In
case of the `initialize` attribute is defined as dictionary, the proposed
values are taken from the dictionary if th
Raise:
TypeError: If the `initialize` attribute is not of type `bool` or `dict`.
"""
if isinstance(self._args, bool):
self.default_values()
elif isinstance(self._args, dict):
ReferenceKeys().detection_check(self._args)
self.height = self.check_key_exists(
key="height", args=self._args, value=self.estimate_height
)
self.threshold = self.check_key_exists(
key="threshold", args=self._args, value=self.estimate_threshold
)
self.distance = self.check_key_exists(
key="distance", args=self._args, value=self.estimate_distance
)
self.prominence = self.check_key_exists(
key="prominence", args=self._args, value=self.estimate_prominence
)
self.width = self.check_key_exists(
key="width", args=self._args, value=self.estimated_width
)
self.wlen = self.check_key_exists(
key="wlen", args=self._args, value=self.estimated_wlen
)
self.rel_height = self.check_key_exists(
key="rel_height", args=self._args, value=self.estimated_rel_height
)
self.plateau_size = self.check_key_exists(
key="plateau_size", args=self._args, value=0.0
)
else:
raise TypeError(
f"The type of the `args` is not supported: {type(self._args)}"
)
def default_values(self) -> None:
"""Set the default values for the peak detection."""
self.height = self.estimate_height
self.threshold = self.estimate_threshold
self.distance = self.estimate_distance
self.prominence = self.estimate_prominence
self.width = self.estimated_width
self.wlen = self.estimated_wlen
self.rel_height = self.estimated_rel_height
self.plateau_size = 0
def __autodetect__(self) -> Any:
"""Return peak positions and properties."""
return find_peaks(
self.data,
height=self.height,
threshold=self.threshold,
distance=self.distance,
prominence=self.prominence,
width=self.width,
wlen=self.wlen,
rel_height=self.rel_height,
plateau_size=self.plateau_size,
)
class ModelParameters(AutoPeakDetection):
"""Class to define the model parameters."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize the model parameters.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]):
Nested arguments dictionary for the model based on **one** or **two**
`int` keys depending if global fitting parameters, will explicit
defined or not.
!!! note "About `args` for models"
The `args` dictionary is used to define the model parameters. And the total
nested dictionary structure is as follows:
```python
args: Dict[str, Dict[int, Dict[str, Dict[str, Union[str, int, float]]]]]
```
!!! info "About the fitting options"
In general, there are two option for the fitting possible:
1. `Classic fitting` or `local fitting`, where the parameters are defined
for a 2D spectrum.
2. `Global fitting`, where the parameters are defined for a 3D spectrum.
Here, the parameters can be automatically defined for each column on the
basis of the initial parameters or they can be completley defined by the
user. The `global fitting` definition starts at `1` similiar to the
peaks attributes notation.
"""
self.col_len = df.shape[1] - 1
self.args = args
self.params = Parameters()
self.x, self.data = self.df_to_numvalues(df=df, args=args)
super().__init__(self.x, self.data, self.args)
def df_to_numvalues(
self, df: pd.DataFrame, args: Dict[str, Any]
) -> Tuple[NDArray[np.float64], NDArray[np.float64]]:
"""Transform the dataframe to numeric values of `x` and `data`.
!!! note "About the dataframe to numeric values"
The transformation is done by the `value` property of pandas. The dataframe
is separated into the `x` and `data` columns and the `x` column is
transformed to the energy values and the `data` column is transformed to
the intensity values depending on the `args` dictionary. In terms of global
fitting, the `data` contains the intensity values for each column.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
Tuple[NDArray[np.float64], NDArray[np.float64]]: Tuple of `x` and
`data` as numpy arrays.
"""
if args["global_"]:
return (
df[args["column"][0]].to_numpy(),
df.loc[:, df.columns != args["column"][0]].to_numpy(),
)
return (df[args["column"][0]].to_numpy(), df[args["column"][1]].to_numpy())
@property
def return_params(self) -> Parameters:
"""Return the `class` representation of the model parameters.
Returns:
Parameters: Model parameters class.
"""
self.__perform__()
return self.params
def __str__(self) -> str:
"""Return the `string` representation of the model parameters.
Returns:
str: String representation of the model parameters.
"""
self.__perform__()
return str(self.params)
def __perform__(self) -> None:
"""Perform the model parameter definition.
Raises:
KeyError: Global fitting is combination with automatic peak detection is
not implemented yet.
"""
if self.args["global_"] == 0 and not self.args["autopeak"]:
self.define_parameters()
elif self.args["global_"] == 1 and not self.args["autopeak"]:
self.define_parameters_global()
elif self.args["global_"] == 2 and not self.args["autopeak"]:
self.define_parameters_global_pre()
elif self.args["global_"] == 0:
self.initialize_peak_detection()
self.define_parameters_auto()
elif self.args["global_"] in [1, 2]:
raise KeyError(
"Global fitting mode with automatic peak detection "
"is not supported yet."
)
def define_parameters_auto(self) -> None:
"""Auto define the model parameters for local fitting."""
positions, properties = self.__autodetect__()
if (
not isinstance(self.args["autopeak"], bool)
and "model_type" in self.args["autopeak"]
):
_model = self.args["autopeak"]["model_type"].lower()
ReferenceKeys().automodel_check(model=_model)
models = _model
else:
models = "gaussian"
if models == "gaussian":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmg_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "lorentzian":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhml_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "voigt":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmv_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "pseudovoigt":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmg_{i}",
value=0.5 * _fhmw,
min=0,
max=_fhmw,
vary=True,
)
self.params.add(
f"{models}_fwhml_{i}",
value=0.5 * _fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
self.args["auto_generated_models"] = {
"models": {
key: {
"value": self.params[key].value,
"min": self.params[key].min,
"max": self.params[key].max,
"vary": self.params[key].vary,
}
for key in self.params
},
"positions": positions.tolist(),
"properties": {key: value.tolist() for key, value in properties.items()},
}
def define_parameters(self) -> None:
"""Define the input parameters for a `params`-dictionary for classic fitting."""
for key_1, value_1 in self.args["peaks"].items():
self.define_parameters_loop(key_1=key_1, value_1=value_1)
def define_parameters_loop(self, key_1: str, value_1: Dict[str, Any]) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
value_1 (Dict[str, Any]): The value of the first level of the input
dictionary.
"""
for key_2, value_2 in value_1.items():
self.define_parameters_loop_2(key_1=key_1, key_2=key_2, value_2=value_2)
def define_parameters_loop_2(
self, key_1: str, key_2: str, value_2: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
value_2 (Dict[str, Any]): The value of the second level of the input
dictionary.
"""
for key_3, value_3 in value_2.items():
self.define_parameters_loop_3(
key_1=key_1, key_2=key_2, key_3=key_3, value_3=value_3
)
def define_parameters_loop_3(
self, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
self.params.add(f"{key_2}_{key_3}_{key_1}", **value_3)
def define_parameters_global(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting."""
for col_i in range(self.col_len):
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
self._define_parameter(
col_i=col_i,
key_1=key_1,
key_2=key_2,
key_3=key_3,
value_3=value_3,
)
def _define_parameter(
self, col_i: int, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.
Args:
col_i (int): The column index.
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
if col_i:
if key_3 != "amplitude":
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
expr=f"{key_2}_{key_3}_{key_1}_1",
)
else:
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
**value_3,
)
else:
self.params.add(f"{key_2}_{key_3}_{key_1}_1", **value_3)
def define_parameters_global_pre(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.
!!! warning "About `params` for global fitting"
`define_parameters_global_pre` requires fully defined `params`-dictionary
in the json, toml, or yaml file input. This means:
1. Number of the spectra must be defined.
2. Number of the peaks must be defined.
3. Number of the parameters must be defined.
4. The parameters must be defined.
"""
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
for key_4, value_4 in value_3.items():
self.params.add(f"{key_3}_{key_4}_{key_2}_{key_1}", **value_4)
class SolverModels(ModelParameters):
"""Solving models for 2D and 3D data sets.
!!! hint "Solver Modes"
* `"2D"`: Solve 2D models via the classic `lmfit` function.
* `"3D"`: Solve 3D models via global git. For the `global-fitting` procedure,
the `lmfit` function is used to solve the models with an extended set of
parameters.
the `lmfit` function is used.
"""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize the solver modes.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
super().__init__(df=df, args=args)
self.args_solver = SolverModelsAPI(**args).dict()
self.args_global = GlobalFittingAPI(**args).dict()
self.params = self.return_params
def __call__(self) -> Tuple[Minimizer, Any]:
"""Solve the fitting model.
Returns:
Tuple[Minimizer, Any]: Minimizer class and the fitting results.
"""
if self.args_global["global_"]:
minimizer = Minimizer(
self.solve_global_fitting,
params=self.params,
fcn_args=(self.x, self.data),
**self.args_solver["minimizer"],
)
else:
minimizer = Minimizer(
self.solve_local_fitting,
params=self.params,
fcn_args=(self.x, self.data),
**self.args_solver["minimizer"],
)
return (minimizer, minimizer.minimize(**self.args_solver["optimizer"]))
@staticmethod
def solve_local_fitting(
params: Dict[str, Parameters],
x: NDArray[np.float64],
data: NDArray[np.float64],
) -> NDArray[np.float64]:
r"""Solving the fitting problem.
!!! note "About implemented models"
`solve_local_fitting` is a wrapper function for the calling the implemented
moldels. Based on the `params` dictionary, the function calls the
corresponding models and merge them to the general model with will be
optimized by the `lmfit`-optimizer.
Currently the following models are supported:
- [Gaussian](https://en.wikipedia.org/wiki/Gaussian_function)
- [Lorentzian](https://en.wikipedia.org/wiki/Cauchy_distribution)
also known as Cauchy distribution
- [Voigt](https://en.wikipedia.org/wiki/Voigt_profile)
- [Pseudo Voigt][1]
- Exponential
- [power][2] (also known as Log-parabola or just power)
- Linear
- Constant
- [Error Function](https://en.wikipedia.org/wiki/Error_function)
- [Arcus Tangens][3]
- Logarithmic
[1]: https://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_approximation
[2]: https://en.wikipedia.org/wiki/Power_law
[3]: https://en.wikipedia.org/wiki/Inverse_trigonometric_functions
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 1d-array.
Returns:
NDArray[np.float64]: The best-fitted data based on the proposed model.
"""
val = np.zeros(x.shape)
peak_kwargs: Dict[Tuple[str, str], Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
c_name = model.split("_")
peak_kwargs[(c_name[0], c_name[2])][c_name[1]] = params[model]
for key, _kwarg in peak_kwargs.items():
val += getattr(DistributionModels(), key[0])(x, **_kwarg)
return val - data
@staticmethod
def solve_global_fitting(
params: Dict[str, Parameters],
x: NDArray[np.float64],
data: NDArray[np.float64],
) -> NDArray[np.float64]:
r"""Solving the fitting for global problem.
!!! note "About implemented models"
`solve_global_fitting` is the global solution of `solve_local_fitting` a
wrapper function for the calling the implemented moldels. For the kind of
supported models see `solve_local_fitting`.
!!! note "About the global solution"
The global solution is a solution for the problem, where the `x`-values is
the energy, but the y-values are the intensities, which has to be fitted as
one unit. For this reason, the residual is calculated as the difference
between all the y-values and the global proposed solution. Later the
residual has to be flattened to a 1-dimensional array and minimized by the
`lmfit`-optimizer.
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 2D-array.
Returns:
NDArray[np.float64]: The best-fitted data based on the proposed model.
"""
val = np.zeros(data.shape)
peak_kwargs: Dict[Tuple[str, str, str], Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
c_name = model.split("_")
peak_kwargs[(c_name[0], c_name[2], c_name[3])][c_name[1]] = params[model]
for key, _kwarg in peak_kwargs.items():
i = int(key[2]) - 1
val[:, i] += getattr(DistributionModels(), key[0])(x, **_kwarg)
val -= data
return val.flatten()
def calculated_model(
params: Dict[str, Parameters],
x: NDArray[np.float64],
df: pd.DataFrame,
global_fit: int,
) -> pd.DataFrame:
r"""Calculate the single contributions of the models and add them to the dataframe.
!!! note "About calculated models"
`calculated_model` are also wrapper functions similar to `solve_model`. The
overall goal is to extract from the best parameters the single contributions in
the model. Currently, `lmfit` provides only a single model, so the best-fit.
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will be
extended by the single contribution of the model.
global_fit (int): If 1 or 2, the model is calculated for the global fit.
Returns:
pd.DataFrame: Extended dataframe containing the single contributions of the
models.
"""
peak_kwargs: Dict[Any, Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
p_name = model.split("_")
if global_fit:
peak_kwargs[(p_name[0], p_name[2], p_name[3])][p_name[1]] = params[model]
else:
peak_kwargs[(p_name[0], p_name[2])][p_name[1]] = params[model]
_df = df.copy()
for key, _kwarg in peak_kwargs.items():
c_name = f"{key[0]}_{key[1]}_{key[2]}" if global_fit else f"{key[0]}_{key[1]}"
_df[c_name] = getattr(DistributionModels(), key[0])(x, **_kwarg)
return _df
<|code_end|>
spectrafit/tools.py
<|code_start|>"""Collection of essential tools for running SpectraFit."""
import gzip
import json
import pickle
import sys
from pathlib import Path
from typing import Any
from typing import Dict
from typing import MutableMapping
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import tomli
import yaml
from lmfit import Minimizer
from lmfit import conf_interval
from lmfit.minimizer import MinimizerException
from spectrafit.api.tools_model import ColumnNamesAPI
from spectrafit.models import calculated_model
from spectrafit.report import RegressionMetrics
from spectrafit.report import fit_report_as_dict
class PreProcessing:
"""Summarized all pre-processing-filters together."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize PreProcessing class.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.df = df
self.args = args
def __call__(self) -> Tuple[pd.DataFrame, Dict[str, Any]]:
"""Apply all pre-processing-filters.
Returns:
pd.DataFrame: DataFrame containing the input data (`x` and `data`), which
are optionally:
1. shrinked to a given range
2. shifted
3. linear oversampled
4. smoothed
Dict[str,Any]: Adding a descriptive statistics to the input dictionary.
"""
df_copy: pd.DataFrame = self.df.copy()
self.args["data_statistic"] = df_copy.describe(
percentiles=np.arange(0.1, 1.0, 0.1)
).to_dict(orient="split")
try:
if isinstance(self.args["energy_start"], (int, float)) or isinstance(
self.args["energy_stop"], (int, float)
):
df_copy = self.energy_range(df_copy, self.args)
if self.args["shift"]:
df_copy = self.energy_shift(df_copy, self.args)
if self.args["oversampling"]:
df_copy = self.oversampling(df_copy, self.args)
if self.args["smooth"]:
df_copy = self.smooth_signal(df_copy, self.args)
except KeyError as exc:
print(f"KeyError: {exc} is not part of the dataframe!")
sys.exit(1)
return (df_copy, self.args)
@staticmethod
def energy_range(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Select the energy range for fitting.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are shrinked according to the energy range.
"""
energy_start: Union[int, float] = args["energy_start"]
energy_stop: Union[int, float] = args["energy_stop"]
df_copy: pd.DataFrame = df.copy()
if isinstance(energy_start, (int, float)) and isinstance(
energy_stop, (int, float)
):
return df_copy.loc[
(df[args["column"][0]] >= energy_start)
& (df[args["column"][0]] <= energy_stop)
]
elif isinstance(energy_start, (int, float)):
return df_copy.loc[df[args["column"][0]] >= energy_start]
elif isinstance(energy_stop, (int, float)):
return df_copy.loc[df[args["column"][0]] <= energy_stop]
@staticmethod
def energy_shift(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Shift the energy axis by a given value.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are energy-shifted by the given value.
"""
df_copy: pd.DataFrame = df.copy()
df_copy.loc[:, args["column"][0]] = (
df[args["column"][0]].to_numpy() + args["shift"]
)
return df_copy
@staticmethod
def oversampling(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Oversampling the data to increase the resolution of the data.
!!! note "About Oversampling"
In this implementation of oversampling, the data is oversampled by the
factor of 5. In case of data with only a few points, the increased
resolution should allow to easier solve the optimization problem. The
oversampling based on a simple linear regression.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are oversampled by the factor of 5.
"""
x_values = np.linspace(
df[args["column"][0]].min(),
df[args["column"][0]].max(),
5 * df.shape[0],
)
y_values = np.interp(
x_values,
df[args["column"][0]].to_numpy(),
df[args["column"][1]].to_numpy(),
)
return pd.DataFrame({args["column"][0]: x_values, args["column"][1]: y_values})
@staticmethod
def smooth_signal(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Smooth the intensity values.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are smoothed by the given value.
"""
box = np.ones(args["smooth"]) / args["smooth"]
df_copy: pd.DataFrame = df.copy()
df_copy.loc[:, args["column"][1]] = np.convolve(
df[args["column"][1]].to_numpy(), box, mode="same"
)
return df_copy
class PostProcessing:
"""Post-processing of the dataframe."""
def __init__(
self, df: pd.DataFrame, args: Dict[str, Any], minimizer: Minimizer, result: Any
) -> None:
"""Initialize PostProcessing class.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
minimizer (Minimizer): The minimizer class.
result (Any): The result of the minimization of the best fit.
"""
self.args = args
self.df = self.rename_columns(df=df)
self.minimizer = minimizer
self.result = result
self.data_size = self.check_global_fitting()
def __call__(self) -> Tuple[pd.DataFrame, Dict[str, Any]]:
"""Call the post-processing."""
self.make_insight_report()
self.make_residual_fit()
self.make_fit_contributions()
self.export_correlation2args()
self.export_results2args()
self.export_regression_metrics2args()
self.export_desprective_statistic2args()
return (self.df, self.args)
def check_global_fitting(self) -> Optional[int]:
"""Check if the global fitting is performed.
!!! note "About Global Fitting"
In case of the global fitting, the data is extended by the single
contribution of the model.
Returns:
Optional[int]: The number of spectra of the global fitting.
"""
if self.args["global_"]:
return max(
int(self.result.params[i].name.split("_")[-1])
for i in self.result.params
)
return None
def rename_columns(self, df: pd.DataFrame) -> pd.DataFrame:
"""Rename the columns of the dataframe.
Rename the columns of the dataframe to the names defined in the input file.
Args:
df (pd.DataFrame): DataFrame containing the original input data, which are
individually pre-named.
Returns:
pd.DataFrame: DataFrame containing renamed columns. All column-names are
lowered. In case of a regular fitting, the columns are named `energy`
and `intensity`. In case of a global fitting, `energy` stays `energy`
and `intensity` is extended by a `_` and column index; like: `energy`
and `intensity_1`, `intensity_2`, `intensity_...` depending on
the dataset size.
"""
if self.args["global_"]:
return df.rename(
columns={
col: ColumnNamesAPI().energy
if i == 0
else f"{ColumnNamesAPI().intensity}_{i}"
for i, col in enumerate(df.columns)
}
)
return df.rename(
columns={
df.columns[0]: ColumnNamesAPI().energy,
df.columns[1]: ColumnNamesAPI().intensity,
}
)
def make_insight_report(self) -> None:
"""Make an insight-report of the fit statistic.
!!! note "About Insight Report"
The insight report based on:
1. Configurations
2. Statistics
3. Variables
4. Error-bars
5. Correlations
6. Covariance Matrix
7. _Optional_: Confidence Interval
All of the above are included in the report as dictionary in `args`.
"""
self.args["fit_insights"] = fit_report_as_dict(
self.result, modelpars=self.result.params
)
if self.args["conf_interval"]:
try:
self.args["confidence_interval"] = conf_interval(
self.minimizer, self.result, **self.args["conf_interval"]
)
except (MinimizerException, ValueError, KeyError) as exc:
print(f"Error: {exc} -> No confidence interval could be calculated!")
self.args["confidence_interval"] = {}
def make_residual_fit(self) -> None:
r"""Make the residuals of the model and the fit.
!!! note "About Residual and Fit"
The residual is calculated by the difference of the best fit `model` and
the reference `data`. In case of a global fitting, the residuals are
calculated for each `spectra` separately plus an avaraged global residual.
$$
\mathrm{residual} = \mathrm{model} - \mathrm{data}
$$
$$
\mathrm{residual}_{i} = \mathrm{model}_{i} - \mathrm{data}_{i}
$$
$$
\mathrm{residual}_{avg} = \frac{ \sum_{i}
\mathrm{model}_{i} - \mathrm{data}_{i}}{i}
$$
The fit is defined by the difference sum of fit and reference data. In case
of a global fitting, the residuals are calculated for each `spectra`
separately.
"""
df_copy: pd.DataFrame = self.df.copy()
if self.args["global_"]:
residual = self.result.residual.reshape((-1, self.data_size)).T
for i, _residual in enumerate(residual, start=1):
df_copy[f"{ColumnNamesAPI().residual}_{i}"] = _residual
df_copy[f"{ColumnNamesAPI().fit}_{i}"] = (
self.df[f"{ColumnNamesAPI().intensity}_{i}"].to_numpy() + _residual
)
df_copy[f"{ColumnNamesAPI().residual}_avg"] = np.mean(residual, axis=0)
else:
residual = self.result.residual
df_copy[ColumnNamesAPI().residual] = residual
df_copy[ColumnNamesAPI().fit] = (
self.df[ColumnNamesAPI().intensity].to_numpy() + residual
)
self.df = df_copy
def make_fit_contributions(self) -> None:
"""Make the fit contributions of the best fit model.
!!! info "About Fit Contributions"
The fit contributions are made independently of the local or global fitting.
"""
self.df = calculated_model(
params=self.result.params,
x=self.df.iloc[:, 0].to_numpy(),
df=self.df,
global_fit=self.args["global_"],
)
def export_correlation2args(self) -> None:
"""Export the correlation matrix to the input file arguments.
!!! note "About Correlation Matrix"
The linear correlation matrix is calculated from and for the pandas
dataframe and divided into two parts:
1. Linear correlation matrix
2. Non-linear correlation matrix (coming later ...)
!!! note "About reading the correlation matrix"
The correlation matrix is stored in the `args` as a dictionary with the
following keys:
* `index`
* `columns`
* `data`
For re-reading the data, it is important to use the following code:
>>> import pandas as pd
>>> pd.DataFrame(**args["linear_correlation"])
Important is to use the generator function for access the three keys and
their values.
"""
self.args["linear_correlation"] = self.df.corr().to_dict(orient="split")
def export_results2args(self) -> None:
"""Export the results of the fit to the input file arguments."""
self.args["fit_result"] = self.df.to_dict(orient="split")
def export_regression_metrics2args(self) -> None:
"""Export the regression metrics of the fit to the input file arguments.
!!! note "About Regression Metrics"
The regression metrics are calculated by the `statsmodels.stats.diagnostic`
module.
"""
self.args["regression_metrics"] = RegressionMetrics(self.df)()
def export_desprective_statistic2args(self) -> None:
"""Export the descriptive statistic of the spectra, fit, and contributions."""
self.args["descriptive_statistic"] = self.df.describe(
percentiles=np.arange(0.1, 1, 0.1)
).to_dict(orient="split")
class SaveResult:
"""Saving the result of the fitting process."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize SaveResult class.
!!! note "About SaveResult"
The SaveResult class is responsible for saving the results of the
optimization process. The results are saved in the following formats:
1. JSON (default) for all results and meta data of the fitting process.
2. CSV for the results of the optimization process.
!!! note "About the output `CSV`-file"
The output files are seperated into three classes:
1. The `results` of the optimization process.
2. The `correlation analysis` of the optimization process.
3. The `error analysis` of the optimization process.
The result outputfile contains the following information:
1. The column names of the energy axis (`x`) and the intensity values
(`data`)
2. The name of the column containing the energy axis (`x`)
3. The name of the column containing the intensity values (`data`)
4. The name of the column containing the best fit (`best_fit`)
5. The name of the column containing the residuum (`residuum`)
6. The name of the column containing the model contribution (`model`)
7. The name of the column containing the error of the model
contribution (`model_error`)
8. The name of the column containing the error of the best fit
(`best_fit_error`)
9. The name of the column containing the error of the residuum
(`residuum_error`)
The `correlation analysis` file contains the following information about all
attributes of the model:
1. Energy
2. Intensity or Intensities (global fitting)
3. Residuum
4. Best fit
5. Model contribution(s)
The `error analysis` file contains the following information about all model
attributes vs:
1. Initial model values
2. Current model values
3. Best model values
4. Residuum / error relative to the best fit
5. Residuum / error relative to the absolute fit
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.df = df
self.args = args
def __call__(self) -> None:
"""Call the SaveResult class."""
self.save_as_json()
self.save_as_csv()
def save_as_csv(self) -> None:
"""Save the the fit results to csv files.
!!! note "About saving the fit results"
The fit results are saved to csv files and are divided into three different
categories:
1. The `results` of the optimization process.
2. The `correlation analysis` of the optimization process.
3. The `error analysis` of the optimization process.
"""
self.df.to_csv(Path(f"{self.args['outfile']}_fit.csv"), index=False)
pd.DataFrame.from_dict(self.args["linear_correlation"]).to_csv(
Path(f"{self.args['outfile']}_correlation.csv"),
index=True,
index_label="attributes",
)
pd.DataFrame.from_dict(self.args["fit_insights"]["variables"]).to_csv(
Path(f"{self.args['outfile']}_errors.csv"),
index=True,
index_label="attributes",
)
def save_as_json(self) -> None:
"""Save the fitting result as json file."""
if self.args["outfile"]:
with open(
Path(f"{self.args['outfile']}_summary.json"), "w", encoding="utf-8"
) as f:
json.dump(self.args, f, indent=4)
else:
raise FileNotFoundError("No output file provided!")
def read_input_file(fname: Path) -> MutableMapping[str, Any]:
"""Read the input file.
Read the input file as `toml`, `json`, or `yaml` files and return as a dictionary.
Args:
fname (str): Name of the input file.
Raises:
OSError: If the input file is not supported.
Returns:
dict: Return the input file arguments as a dictionary with additional
information beyond the command line arguments.
"""
fname = Path(fname)
if fname.suffix == ".toml":
with open(fname, "rb") as f:
args = tomli.load(f)
elif fname.suffix == ".json":
with open(fname, encoding="utf-8") as f:
args = json.load(f)
elif fname.suffix in [".yaml", ".yml"]:
with open(fname, encoding="utf-8") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
else:
raise OSError(
f"ERROR: Input file {fname} has not supported file format.\n"
"Supported fileformats are: '*.json', '*.yaml', and '*.toml'"
)
return args
def load_data(args: Dict[str, str]) -> pd.DataFrame:
"""Load the data from a txt file.
!!! note "About the data format"
Load data from a txt file, which can be an ASCII file as txt, csv, or
user-specific but rational file. The file can be separated by a delimiter.
In case of 2d data, the columns has to be defined. In case of 3D data, all
columns are considered as data.
Args:
args (Dict[str,str]): The input file arguments as a dictionary with additional
information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will be
extended by the single contribution of the model.
"""
try:
if args["global_"]:
return pd.read_csv(
args["infile"],
sep=args["separator"],
header=args["header"],
dtype=np.float64,
decimal=args["decimal"],
comment=args["comment"],
)
return pd.read_csv(
args["infile"],
sep=args["separator"],
header=args["header"],
usecols=args["column"],
dtype=np.float64,
decimal=args["decimal"],
comment=args["comment"],
)
except ValueError as exc:
print(f"Error: {exc} -> Dataframe contains non numeric data!")
sys.exit(1)
def check_keywords_consistency(
check_args: MutableMapping[str, Any], ref_args: Dict[str, Any]
) -> None:
"""Check if the keywords are consistent.
Check if the keywords are consistent between two dictionaries. The two dictionaries
are reference keywords of the `cmd_line_args` and the `args` of the `input_file`.
Args:
check_args (MutableMapping[str, Any]): First dictionary to be checked.
ref_args (Dict[str,Any]): Second dictionary to be checked.
Raises:
KeyError: If the keywords are not consistent.
"""
for key in check_args:
if key not in ref_args.keys():
raise KeyError(f"ERROR: The {key} is not parameter of the `cmd-input`!")
def unicode_check(f: Any, encoding: str = "latin1") -> Any:
"""Check if the pkl file is encoded in unicode.
Args:
f (Any): The pkl file to load.
encoding (str, optional): The encoding to use. Defaults to "latin1".
Returns:
Any: The pkl file, which can be a nested dictionary containing raw data,
metadata, and other information.
"""
try:
data_dict = pickle.load(f)
except UnicodeDecodeError: # pragma: no cover
data_dict = pickle.load(f, encoding=encoding)
return data_dict
def pkl2any(pkl_fname: Path, encoding: str = "latin1") -> Any:
"""Load a pkl file and return the data as a any type of data or object.
Args:
pkl_fname (Path): The pkl file to load.
encoding (str, optional): The encoding to use. Defaults to "latin1".
Raises:
ValueError: If the file format is not supported.
Returns:
Any: Data or objects, which can contain various data types supported by pickle.
"""
if pkl_fname.suffix == ".gz":
with gzip.open(pkl_fname, "rb") as f:
return unicode_check(f, encoding=encoding)
elif pkl_fname.suffix == ".pkl":
with open(pkl_fname, "rb") as f:
return unicode_check(f, encoding=encoding)
else:
choices = [".pkl", ".pkl.gz"]
raise ValueError(
f"File format '{pkl_fname.suffix}' is not supported. "
f"Supported file formats are: {choices}"
)
def pure_fname(fname: Path) -> Path:
"""Return the filename without the suffix.
Pure filename without the suffix is implemented to avoid the problem with
multiple dots in the filename like `test.pkl.gz` or `test.tar.gz`.
The `stem` attribute of the `Path` class returns the filename without the
suffix, but it also removes only the last suffix. Hence, the `test.pkl.gz`
will be returned as `test.pkl` and not as `test`. This function returns
the filename without the suffix. It is implemented recursively to remove
all suffixes.
Args:
fname (Path): The filename to be processed.
Returns:
Path: The filename without the suffix.
"""
_fname = fname.parent / fname.stem
return pure_fname(_fname) if _fname.suffix else _fname
<|code_end|>
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "0.16.2"
<|code_end|>
spectrafit/models.py
<|code_start|>"""Minimization models for curve fitting."""
from collections import defaultdict
from dataclasses import dataclass
from math import log
from math import pi
from math import sqrt
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from lmfit import Minimizer
from lmfit import Parameters
from numpy.typing import NDArray
from scipy.signal import find_peaks
from scipy.special import erf
from scipy.special import wofz
from scipy.stats import hmean
from spectrafit.api.tools_model import AutopeakAPI
from spectrafit.api.tools_model import GlobalFittingAPI
from spectrafit.api.tools_model import SolverModelsAPI
class DistributionModels:
"""Distribution models for the fit.
!!! note "About distribution models"
`DistributionModels` are wrapper functions for the distribution models. The
overall goal is to extract from the best parameters the single contributions in
the model. The superposition of the single contributions is the final model.
!!! note "About the cumulative distribution"
The cumulative distribution is the sum of the single contributions. The
cumulative distribution is the model that is fitted to the data. In contrast to
the single contributions, the cumulative distribution is not normalized and
therefore the amplitude of the single contributions is not directly comparable
to the amplitude of the cumulative distribution. Also, the cumulative
distributions are consquently using the `fwhm` parameter instead of the
`sigma` parameter.
"""
def gaussian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Gaussian distribution.
$$
{\displaystyle g(x)={\frac {1}{\sigma {\sqrt {2\pi }}}}\exp
( -{\frac {1}{2}}{\frac {(x-\mu )^{2}}{\sigma ^{2}}} ) }
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Gaussian distribution.
Defaults to 1.0.
center (float, optional): Center of the Gaussian distribution.
Defaults to 0.0.
fwhmg (float, optional): Full width at half maximum (FWHM) of the Gaussian
distribution. Defaults to 1.0.
Returns:
NDArray[np.float64]: Gaussian distribution of `x` given.
"""
sigma = fwhmg * Constants.fwhmg2sig
return np.array(amplitude / (Constants.sq2pi * sigma)) * np.exp(
-((1.0 * x - center) ** 2) / (2 * sigma**2)
)
def lorentzian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Lorentzian distribution.
$$
f(x;x_{0},\gamma )={\frac {1}{\pi \gamma
[ 1+ ( {\frac {x-x_{0}}{\gamma }})^{2} ]
}} ={1 \over \pi \gamma } [ {\gamma ^{2} \over (x-x_{0})^{2}+\gamma ^{2}} ]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Lorentzian distribution.
Defaults to 1.0.
center (float, optional): Center of the Lorentzian distribution. Defaults to
0.0.
fwhml (float, optional): Full width at half maximum (FWHM) of the Lorentzian
distribution. Defaults to 1.0.
Returns:
Union[NDArray[np.float64], float]: Lorentzian distribution of `x` given.
"""
sigma = fwhml * Constants.fwhml2sig
return np.array(amplitude / (1 + ((1.0 * x - center) / sigma) ** 2)) / (
pi * sigma
)
def voigt(
self,
x: NDArray[np.float64],
center: float = 0.0,
fwhmv: float = 1.0,
gamma: Optional[float] = None,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Voigt distribution.
$$
{\displaystyle V(x;\sigma ,\gamma )\equiv
\int_{-\infty }^{\infty }G(x';\sigma )
L(x-x';\gamma )\,dx'}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
center (float, optional): Center of the Voigt distribution. Defaults to 0.0.
fwhmv (float, optional): Full width at half maximum (FWHM) of the Lorentzian
distribution. Defaults to 1.0.
gamma (float, optional): Scaling factor of the complex part of the
[Faddeeva Function](https://en.wikipedia.org/wiki/Faddeeva_function).
Defaults to None.
Returns:
NDArray[np.float64]: Voigt distribution of `x` given.
"""
sigma = fwhmv * Constants.fwhmv2sig
if gamma is None:
gamma = sigma
z = (x - center + 1j * gamma) / (sigma * Constants.sq2)
return np.array(wofz(z).real / (sigma * Constants.sq2pi))
def pseudovoigt(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional Pseudo-Voigt distribution.
!!! note "See also:"
J. Appl. Cryst. (2000). 33, 1311-1316
https://doi.org/10.1107/S0021889800010219
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Pseudo-Voigt distribution.
Defaults to 1.0.
center (float, optional): Center of the Pseudo-Voigt distribution.
Defaults to 0.0.
fwhmg (float, optional): Full width half maximum of the Gaussian
distribution in the Pseudo-Voigt distribution. Defaults to 1.0.
fwhml (float, optional): Full width half maximum of the Lorentzian
distribution in the Pseudo-Voigt distribution. Defaults to 1.0.
Returns:
NDArray[np.float64]: Pseudo-Voigt distribution of `x` given.
"""
f = np.power(
fwhmg**5
+ 2.69269 * fwhmg**4 * fwhml
+ 2.42843 * fwhmg**3 * fwhml**2
+ 4.47163 * fwhmg**2 * fwhml**3
+ 0.07842 * fwhmg * fwhml**4
+ fwhml**5,
0.2,
)
n = (
1.36603 * (fwhml / f)
- 0.47719 * (fwhml / f) ** 2
+ 0.11116 * (fwhml / f) ** 3
)
return np.array(
n * self.lorentzian(x=x, amplitude=amplitude, center=center, fwhml=fwhml)
+ (1 - n)
* self.gaussian(x=x, amplitude=amplitude, center=center, fwhmg=fwhmg)
)
def exponential(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
decay: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional exponential decay.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the exponential function.
Defaults to 1.0.
decay (float, optional): Decay of the exponential function. Defaults to 1.0.
intercept (float, optional): Intercept of the exponential function.
Defaults to 0.0.
Returns:
NDArray[np.float64]: Exponential decay of `x` given.
"""
return np.array(amplitude * np.exp(-x / decay) + intercept)
def power(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
exponent: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional power function.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the power function. Defaults to
1.0.
exponent (float, optional): Exponent of the power function. Defaults to 1.0.
intercept (float, optional): Intercept of the power function. Defaults to
0.0.
Returns:
NDArray[np.float64]: power function of `x` given.
"""
return np.array(amplitude * np.power(x, exponent) + intercept)
def linear(
self,
x: NDArray[np.float64],
slope: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional linear function.
Args:
x (NDArray[np.float64]): `x`-values of the data.
slope (float, optional): Slope of the linear function. Defaults to 1.0.
intercept (float, optional): Intercept of the linear function.
Defaults to 0.0.
Returns:
NDArray[np.float64]: Linear function of `x` given.
"""
return np.array(slope * x + intercept)
def constant(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional constant value.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the constant. Defaults to 1.0.
Returns:
NDArray[np.float64]: Constant value of `x` given.
"""
return np.array(np.linspace(amplitude, amplitude, len(x)))
@staticmethod
def _norm(
x: NDArray[np.float64], center: float, sigma: float
) -> NDArray[np.float64]:
"""Normalize the data for step functions.
Args:
x (NDArray[np.float64]): `x`-values of the data.
center (float): Center of the step function.
sigma (float): Sigma of the step function.
Returns:
NDArray[np.float64]: Normalized data.
"""
if abs(sigma) < 1.0e-13:
sigma = 1.0e-13
return np.subtract(x, center) / sigma
def erf(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional error function.
$$
f(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the error function.
Defaults to 1.0.
center (float, optional): Center of the error function. Defaults to 0.0.
sigma (float, optional): Sigma of the error function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Error function of `x` given.
"""
return np.array(amplitude * 0.5 * (1 + erf(self._norm(x, center, sigma))))
def heaviside(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Heaviside step function.
$$
f(x) = \begin{cases}
0 & x < 0 \\
0.5 & x = 0 \\
1 & x > 0
\end{cases}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Heaviside step function.
Defaults to 1.0.
center (float, optional): Center of the Heaviside step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the Heaviside step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Heaviside step function of `x` given.
"""
return np.array(amplitude * 0.5 * (1 + np.sign(self._norm(x, center, sigma))))
def atan(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional arctan step function.
$$
f(x) = \frac{1}{\pi} \arctan(\frac{x - c}{s})
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the arctan step function.
Defaults to 1.0.
center (float, optional): Center of the arctan step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the arctan step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Arctan step function of `x` given.
"""
return np.array(
amplitude * 0.5 * (1 + np.arctan(self._norm(x, center, sigma)) / pi)
)
def log(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional logarithmic step function.
$$
f(x) = \frac{1}{1 + e^{-\frac{x - c}{s}}}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the logarithmic step function.
Defaults to 1.0.
center (float, optional): Center of the logarithmic step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the logarithmic step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Logarithmic step function of `x` given.
"""
return np.array(
amplitude * 0.5 * (1 + np.log(self._norm(x, center, sigma)) / pi)
)
def cgaussian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Gaussian function.
$$
f(x) = \frac{1}{2} \left[1 + erf\left(\frac{x - c}{s \sqrt{2}}\right)\right]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Gaussian function. Defaults to
1.0.
center (float, optional): Center of the Gaussian function. Defaults to 0.0.
fwhmg (float, optional): Full width at half maximum of the Gaussian
function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Gaussian function of `x` given.
"""
sigma = fwhmg * Constants.fwhmg2sig
return np.array(
amplitude * 0.5 * (1 + erf((x - center) / (sigma * np.sqrt(2.0))))
)
def clorentzian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Lorentzian function.
$$
f(x) = \frac{1}{\pi} \arctan\left(\frac{x - c}{s}\right) + \frac{1}{2}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Lorentzian function.
Defaults to 1.0.
center (float, optional): Center of the Lorentzian function.
Defaults to 0.0.
fwhml (float, optional): Full width at half maximum of the Lorentzian
function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Lorentzian function of `x` given.
"""
sigma = fwhml * Constants.fwhml2sig
return np.array(amplitude * (np.arctan((x - center) / sigma) / pi) + 0.5)
def cvoigt(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmv: float = 1.0,
gamma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Voigt function.
$$
f(x) = \frac{1}{2} \left[1 + erf\left(\frac{x - c}{s \sqrt{2}}\right)\right]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Voigt function. Defaults to
1.0.
center (float, optional): Center of the Voigt function. Defaults to 0.0.
fwhmv (float, optional): Full width at half maximum of the Voigt function.
Defaults to 1.0.
gamma (float, optional): Gamma of the Voigt function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Voigt function of `x` given.
"""
sigma = fwhmv * Constants.fwhmv2sig
return np.array(
amplitude
* 0.5
* (1 + erf((x - center) / (sigma * np.sqrt(2.0))))
* np.exp(-(((x - center) / gamma) ** 2))
)
@dataclass(frozen=True)
class ReferenceKeys:
"""Reference keys for model fitting and peak detection."""
__models__ = [
func
for func in dir(DistributionModels)
if callable(getattr(DistributionModels, func)) and not func.startswith("_")
]
__automodels__ = [
"gaussian",
"lorentzian",
"voigt",
"pseudovoigt",
]
def model_check(self, model: str) -> None:
"""Check if model is available.
Args:
model (str): Model name.
Raises:
KeyError: If the model is not supported.
"""
if model.split("_")[0] not in self.__models__:
raise NotImplementedError(f"{model} is not supported!")
def automodel_check(self, model: str) -> None:
"""Check if model is available.
Args:
model (str): Auto Model name (gaussian, lorentzian, voigt, or pseudovoigt).
Raises:
KeyError: If the model is not supported.
"""
if model not in self.__automodels__:
raise KeyError(f"{model} is not supported!")
def detection_check(self, args: Dict[str, Any]) -> None:
"""Check if detection is available.
Args:
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Raises:
KeyError: If the key is not parameter of the `scipy.signal.find_peaks`
function. This will be checked via `pydantic` in `spectrafit.api`.
"""
AutopeakAPI(**args)
@dataclass(frozen=True)
class Constants:
r"""Mathematical constants for the curve models.
!!! info "Constants"
1. Natural logarithm of 2
$$
ln2 = \log{2}
$$
2. Square root of 2 times pi
$$
sq2pi = \sqrt{2 \pi}
$$
3. Square root of pi
$$
sqpi = \sqrt{ \pi}
$$
4. Square root of 2
$$
sq2 = \sqrt{2}
$$
5. Full width at half maximum to sigma for Gaussian
$$
fwhmg2sig = \frac{1}{ 2 \sqrt{2\log{2}}}
$$
6. Full width at half maximum to sigma for Lorentzian
$$
fwhml2sig = \frac{1}{2}
$$
7. Full width at half maximum to sigma for Voigt according to the article by
Olivero and Longbothum[^1], check also
[XPSLibary website](https://xpslibrary.com/voigt-peak-shape/).
$$
fwhm_{\text{Voigt}} \approx 0.5346 \cdot fwhm_{\text{Gaussian}} +
\sqrt{ 0.2166 fwhm_{\text{Lorentzian}}^2 + fwhm_{\text{Gaussian}}^2 }
$$
In case of equal FWHM for Gaussian and Lorentzian, the Voigt FWHM can be
defined as:
$$
fwhm_{\text{Voigt}} \approx 1.0692 + 2 \sqrt{0.2166 + 2 \ln{2}} \cdot \sigma
$$
$$
fwhmv2sig = \frac{1}{fwhm_{\text{Voigt}}}
$$
[^1]:
J.J. Olivero, R.L. Longbothum,
_Empirical fits to the Voigt line width: A brief review_,
**Journal of Quantitative Spectroscopy and Radiative Transfer**,
Volume 17, Issue 2, 1977, Pages 233-236, ISSN 0022-4073,
https://doi.org/10.1016/0022-4073(77)90161-3.
"""
ln2 = log(2.0)
sq2pi = sqrt(2.0 * pi)
sqpi = sqrt(pi)
sq2 = sqrt(2.0)
fwhmg2sig = 1 / (2.0 * sqrt(2.0 * log(2.0)))
fwhml2sig = 1 / 2.0
fwhmv2sig = 1 / (2 * 0.5346 + 2 * sqrt(0.2166 + log(2) * 2))
class AutoPeakDetection:
"""Automatic detection of peaks in a spectrum."""
def __init__(
self,
x: NDArray[np.float64],
data: NDArray[np.float64],
args: Dict[str, Any],
) -> None:
"""Initialize the AutoPeakDetection class.
Args:
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 1d-array.
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.x = x
self.data = data
self._args = args["autopeak"]
@staticmethod
def check_key_exists(
key: str, args: Dict[str, Any], value: Union[float, Tuple[Any, Any]]
) -> Any:
"""Check if a key exists in a dictionary.
Please check for the reference key also [scipy.signal.find_peaks][1].
[1]:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
Args:
key (str): Reference key of `scipy.signal.find_peaks`.
args (Dict[str, Any]): Reference values of `scipy.signal.find_peaks`, if not
defined will be set to estimated default values.
value (Union[float, Tuple[float,float]]): Default value for the reference
key.
Returns:
Any: The reference value for `scipy.signal.find_peaks`.
"""
return args.get(key, value)
@property
def estimate_height(self) -> Tuple[float, float]:
r"""Estimate the initial height based on an inverse noise ratio of a signal.
!!! info "About the estimation of the height"
The lower end of the height is the inverse noise ratio of the `data`, and
upper limit is the maximum value of the `data`. The noise ratio of the
`data` is based on the original implementation by `SciPy`:
```python
def signaltonoise(a, axis=0, ddof=0):
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m / sd)
```
Returns:
Tuple[float, float]: Tuple of the inverse signal to noise ratio and
the maximum value of the `data`.
"""
return 1 - self.data.mean() / self.data.std(), self.data.max()
@property
def estimate_threshold(self) -> Tuple[float, float]:
"""Estimate the threshold value for the peak detection.
Returns:
Tuple[float, float]: Minimum and maximum value of the spectrum `data`,
respectively, `intensity`.
"""
return self.data.min(), self.data.max()
@property
def estimate_distance(self) -> float:
"""Estimate the initial distance between peaks.
Returns:
float: Estimated distance between peaks.
"""
min_step = np.diff(self.x).min()
return max(min_step, 1.0)
@property
def estimate_prominence(self) -> Tuple[float, float]:
"""Estimate the prominence of a peak.
!!! info "About the estimation of the prominence"
The prominence is the difference between the height of the peak and the
bottom. To get a estimate of the prominence, the height of the peak is
calculated by maximum value of the `data` and the bottom is calculated by
the harmonic mean of the `data`.
Returns:
Tuple[float, float]: Tuple of the harmonic-mean and maximum value of `data`.
"""
try:
return hmean(self.data), self.data.max()
except ValueError as exc:
print(f"{exc}: Using standard arithmetic mean of NumPy.\n")
return self.data.mean(), self.data.max()
@property
def estimated_width(self) -> Tuple[float, float]:
"""Estimate the width of a peak.
!!! info "About the estimation of the width"
The width of a peak is estimated for a lower and an upper end. For the lower
end, the minimum stepsize is used. For the upper end, the stepsize between
the half maximum and the minimum value of the `data` is used as the width.
Returns:
Tuple[float, float]: Estimated width lower and uper end of the peaks.
"""
return (
np.diff(self.x).min(),
np.abs(self.x[self.data.argmax()] - self.x[self.data.argmin()]) / 2,
)
@property
def estimated_rel_height(self) -> float:
"""Estimate the relative height of a peak.
!!! info "About the estimation of the relative height"
The relative height of a peak is approximated by the difference of the
harmonic mean value of the `data` and the minimum value of the `data`
divided by the factor of `4`. In case of negative ratios, the value will be
set to `Zero`.
Returns:
float: Estimated relative height of a peak.
"""
try:
rel_height = (hmean(self.data) - self.data.min()) / 4
except ValueError as exc:
print(f"{exc}: Using standard arithmetic mean of NumPy.\n")
rel_height = (self.data.mean() - self.data.min()) / 4
return rel_height if rel_height > 0 else 0.0
@property
def estimated_wlen(self) -> float:
r"""Estimate the window length for the peak detection.
!!! info "About the estimation of the window length"
The window length is the length of the window for the peak detection is
defined to be 1% of the length of the `data`, consequently the len of the
`data` is divided by 100. In case of a window length smaller than 1, the
window length will be set to numerical value of 1, which is defined by
`1 + 1e-9`.
Returns:
float: Estimated window length is set to the numeric value of > 1.
"""
wlen = self.data.size / 100
return wlen if wlen > 1.0 else 1 + 1e-9
@property
def estimated_plateau_size(self) -> Tuple[float, float]:
"""Estimate the plateau size for the peak detection.
Returns:
Tuple[float, float]: Estimated plateau size is set to `zero` for the lower
end and the maximum value of the `x` for the upper end.
"""
return 0.0, self.x.max()
def initialize_peak_detection(self) -> None:
"""Initialize the peak detection.
!!! note "Initialize the peak detection"
This method is used to initialize the peak detection. The initialization can
be activated by setting the `initialize` attribute to `True`, which will
automatically estimate the default parameters for the peak detection. In
case of the `initialize` attribute is defined as dictionary, the proposed
values are taken from the dictionary if th
Raise:
TypeError: If the `initialize` attribute is not of type `bool` or `dict`.
"""
if isinstance(self._args, bool):
self.default_values()
elif isinstance(self._args, dict):
ReferenceKeys().detection_check(self._args)
self.height = self.check_key_exists(
key="height", args=self._args, value=self.estimate_height
)
self.threshold = self.check_key_exists(
key="threshold", args=self._args, value=self.estimate_threshold
)
self.distance = self.check_key_exists(
key="distance", args=self._args, value=self.estimate_distance
)
self.prominence = self.check_key_exists(
key="prominence", args=self._args, value=self.estimate_prominence
)
self.width = self.check_key_exists(
key="width", args=self._args, value=self.estimated_width
)
self.wlen = self.check_key_exists(
key="wlen", args=self._args, value=self.estimated_wlen
)
self.rel_height = self.check_key_exists(
key="rel_height", args=self._args, value=self.estimated_rel_height
)
self.plateau_size = self.check_key_exists(
key="plateau_size", args=self._args, value=0.0
)
else:
raise TypeError(
f"The type of the `args` is not supported: {type(self._args)}"
)
def default_values(self) -> None:
"""Set the default values for the peak detection."""
self.height = self.estimate_height
self.threshold = self.estimate_threshold
self.distance = self.estimate_distance
self.prominence = self.estimate_prominence
self.width = self.estimated_width
self.wlen = self.estimated_wlen
self.rel_height = self.estimated_rel_height
self.plateau_size = 0
def __autodetect__(self) -> Any:
"""Return peak positions and properties."""
return find_peaks(
self.data,
height=self.height,
threshold=self.threshold,
distance=self.distance,
prominence=self.prominence,
width=self.width,
wlen=self.wlen,
rel_height=self.rel_height,
plateau_size=self.plateau_size,
)
class ModelParameters(AutoPeakDetection):
"""Class to define the model parameters."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize the model parameters.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]):
Nested arguments dictionary for the model based on **one** or **two**
`int` keys depending if global fitting parameters, will explicit
defined or not.
!!! note "About `args` for models"
The `args` dictionary is used to define the model parameters. And the total
nested dictionary structure is as follows:
```python
args: Dict[str, Dict[int, Dict[str, Dict[str, Union[str, int, float]]]]]
```
!!! info "About the fitting options"
In general, there are two option for the fitting possible:
1. `Classic fitting` or `local fitting`, where the parameters are defined
for a 2D spectrum.
2. `Global fitting`, where the parameters are defined for a 3D spectrum.
Here, the parameters can be automatically defined for each column on the
basis of the initial parameters or they can be completley defined by the
user. The `global fitting` definition starts at `1` similiar to the
peaks attributes notation.
"""
self.col_len = df.shape[1] - 1
self.args = args
self.params = Parameters()
self.x, self.data = self.df_to_numvalues(df=df, args=args)
super().__init__(self.x, self.data, self.args)
def df_to_numvalues(
self, df: pd.DataFrame, args: Dict[str, Any]
) -> Tuple[NDArray[np.float64], NDArray[np.float64]]:
"""Transform the dataframe to numeric values of `x` and `data`.
!!! note "About the dataframe to numeric values"
The transformation is done by the `value` property of pandas. The dataframe
is separated into the `x` and `data` columns and the `x` column is
transformed to the energy values and the `data` column is transformed to
the intensity values depending on the `args` dictionary. In terms of global
fitting, the `data` contains the intensity values for each column.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
Tuple[NDArray[np.float64], NDArray[np.float64]]: Tuple of `x` and
`data` as numpy arrays.
"""
if args["global_"]:
return (
df[args["column"][0]].to_numpy(),
df.loc[:, df.columns != args["column"][0]].to_numpy(),
)
return (df[args["column"][0]].to_numpy(), df[args["column"][1]].to_numpy())
@property
def return_params(self) -> Parameters:
"""Return the `class` representation of the model parameters.
Returns:
Parameters: Model parameters class.
"""
self.__perform__()
return self.params
def __str__(self) -> str:
"""Return the `string` representation of the model parameters.
Returns:
str: String representation of the model parameters.
"""
self.__perform__()
return str(self.params)
def __perform__(self) -> None:
"""Perform the model parameter definition.
Raises:
KeyError: Global fitting is combination with automatic peak detection is
not implemented yet.
"""
if self.args["global_"] == 0 and not self.args["autopeak"]:
self.define_parameters()
elif self.args["global_"] == 1 and not self.args["autopeak"]:
self.define_parameters_global()
elif self.args["global_"] == 2 and not self.args["autopeak"]:
self.define_parameters_global_pre()
elif self.args["global_"] == 0:
self.initialize_peak_detection()
self.define_parameters_auto()
elif self.args["global_"] in [1, 2]:
raise KeyError(
"Global fitting mode with automatic peak detection "
"is not supported yet."
)
def define_parameters_auto(self) -> None:
"""Auto define the model parameters for local fitting."""
positions, properties = self.__autodetect__()
if (
not isinstance(self.args["autopeak"], bool)
and "model_type" in self.args["autopeak"]
):
_model = self.args["autopeak"]["model_type"].lower()
ReferenceKeys().automodel_check(model=_model)
models = _model
else:
models = "gaussian"
if models == "gaussian":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmg_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "lorentzian":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhml_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "voigt":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmv_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "pseudovoigt":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmg_{i}",
value=0.5 * _fhmw,
min=0,
max=_fhmw,
vary=True,
)
self.params.add(
f"{models}_fwhml_{i}",
value=0.5 * _fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
self.args["auto_generated_models"] = {
"models": {
key: {
"value": self.params[key].value,
"min": self.params[key].min,
"max": self.params[key].max,
"vary": self.params[key].vary,
}
for key in self.params
},
"positions": positions.tolist(),
"properties": {key: value.tolist() for key, value in properties.items()},
}
def define_parameters(self) -> None:
"""Define the input parameters for a `params`-dictionary for classic fitting."""
for key_1, value_1 in self.args["peaks"].items():
self.define_parameters_loop(key_1=key_1, value_1=value_1)
def define_parameters_loop(self, key_1: str, value_1: Dict[str, Any]) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
value_1 (Dict[str, Any]): The value of the first level of the input
dictionary.
"""
for key_2, value_2 in value_1.items():
self.define_parameters_loop_2(key_1=key_1, key_2=key_2, value_2=value_2)
def define_parameters_loop_2(
self, key_1: str, key_2: str, value_2: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
value_2 (Dict[str, Any]): The value of the second level of the input
dictionary.
"""
for key_3, value_3 in value_2.items():
self.define_parameters_loop_3(
key_1=key_1, key_2=key_2, key_3=key_3, value_3=value_3
)
def define_parameters_loop_3(
self, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
self.params.add(f"{key_2}_{key_3}_{key_1}", **value_3)
def define_parameters_global(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting."""
for col_i in range(self.col_len):
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
self._define_parameter(
col_i=col_i,
key_1=key_1,
key_2=key_2,
key_3=key_3,
value_3=value_3,
)
def _define_parameter(
self, col_i: int, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.
Args:
col_i (int): The column index.
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
if col_i:
if key_3 != "amplitude":
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
expr=f"{key_2}_{key_3}_{key_1}_1",
)
else:
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
**value_3,
)
else:
self.params.add(f"{key_2}_{key_3}_{key_1}_1", **value_3)
def define_parameters_global_pre(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.
!!! warning "About `params` for global fitting"
`define_parameters_global_pre` requires fully defined `params`-dictionary
in the json, toml, or yaml file input. This means:
1. Number of the spectra must be defined.
2. Number of the peaks must be defined.
3. Number of the parameters must be defined.
4. The parameters must be defined.
"""
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
for key_4, value_4 in value_3.items():
self.params.add(f"{key_3}_{key_4}_{key_2}_{key_1}", **value_4)
class SolverModels(ModelParameters):
"""Solving models for 2D and 3D data sets.
!!! hint "Solver Modes"
* `"2D"`: Solve 2D models via the classic `lmfit` function.
* `"3D"`: Solve 3D models via global git. For the `global-fitting` procedure,
the `lmfit` function is used to solve the models with an extended set of
parameters.
the `lmfit` function is used.
"""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize the solver modes.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
super().__init__(df=df, args=args)
self.args_solver = SolverModelsAPI(**args).dict()
self.args_global = GlobalFittingAPI(**args).dict()
self.params = self.return_params
def __call__(self) -> Tuple[Minimizer, Any]:
"""Solve the fitting model.
Returns:
Tuple[Minimizer, Any]: Minimizer class and the fitting results.
"""
if self.args_global["global_"]:
minimizer = Minimizer(
self.solve_global_fitting,
params=self.params,
fcn_args=(self.x, self.data),
**self.args_solver["minimizer"],
)
else:
minimizer = Minimizer(
self.solve_local_fitting,
params=self.params,
fcn_args=(self.x, self.data),
**self.args_solver["minimizer"],
)
return (minimizer, minimizer.minimize(**self.args_solver["optimizer"]))
@staticmethod
def solve_local_fitting(
params: Dict[str, Parameters],
x: NDArray[np.float64],
data: NDArray[np.float64],
) -> NDArray[np.float64]:
r"""Solving the fitting problem.
!!! note "About implemented models"
`solve_local_fitting` is a wrapper function for the calling the implemented
moldels. Based on the `params` dictionary, the function calls the
corresponding models and merge them to the general model with will be
optimized by the `lmfit`-optimizer.
Currently the following models are supported:
- [Gaussian](https://en.wikipedia.org/wiki/Gaussian_function)
- [Lorentzian](https://en.wikipedia.org/wiki/Cauchy_distribution)
also known as Cauchy distribution
- [Voigt](https://en.wikipedia.org/wiki/Voigt_profile)
- [Pseudo Voigt][1]
- Exponential
- [power][2] (also known as Log-parabola or just power)
- Linear
- Constant
- [Error Function](https://en.wikipedia.org/wiki/Error_function)
- [Arcus Tangens][3]
- Logarithmic
[1]: https://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_approximation
[2]: https://en.wikipedia.org/wiki/Power_law
[3]: https://en.wikipedia.org/wiki/Inverse_trigonometric_functions
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 1d-array.
Returns:
NDArray[np.float64]: The best-fitted data based on the proposed model.
"""
val = np.zeros(x.shape)
peak_kwargs: Dict[Tuple[str, str], Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
c_name = model.split("_")
peak_kwargs[(c_name[0], c_name[2])][c_name[1]] = params[model]
for key, _kwarg in peak_kwargs.items():
val += getattr(DistributionModels(), key[0])(x, **_kwarg)
return val - data
@staticmethod
def solve_global_fitting(
params: Dict[str, Parameters],
x: NDArray[np.float64],
data: NDArray[np.float64],
) -> NDArray[np.float64]:
r"""Solving the fitting for global problem.
!!! note "About implemented models"
`solve_global_fitting` is the global solution of `solve_local_fitting` a
wrapper function for the calling the implemented moldels. For the kind of
supported models see `solve_local_fitting`.
!!! note "About the global solution"
The global solution is a solution for the problem, where the `x`-values is
the energy, but the y-values are the intensities, which has to be fitted as
one unit. For this reason, the residual is calculated as the difference
between all the y-values and the global proposed solution. Later the
residual has to be flattened to a 1-dimensional array and minimized by the
`lmfit`-optimizer.
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 2D-array.
Returns:
NDArray[np.float64]: The best-fitted data based on the proposed model.
"""
val = np.zeros(data.shape)
peak_kwargs: Dict[Tuple[str, str, str], Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
c_name = model.split("_")
peak_kwargs[(c_name[0], c_name[2], c_name[3])][c_name[1]] = params[model]
for key, _kwarg in peak_kwargs.items():
i = int(key[2]) - 1
val[:, i] += getattr(DistributionModels(), key[0])(x, **_kwarg)
val -= data
return val.flatten()
def calculated_model(
params: Dict[str, Parameters],
x: NDArray[np.float64],
df: pd.DataFrame,
global_fit: int,
) -> pd.DataFrame:
r"""Calculate the single contributions of the models and add them to the dataframe.
!!! note "About calculated models"
`calculated_model` are also wrapper functions similar to `solve_model`. The
overall goal is to extract from the best parameters the single contributions in
the model. Currently, `lmfit` provides only a single model, so the best-fit.
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will be
extended by the single contribution of the model.
global_fit (int): If 1 or 2, the model is calculated for the global fit.
Returns:
pd.DataFrame: Extended dataframe containing the single contributions of the
models.
"""
peak_kwargs: Dict[Any, Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
p_name = model.split("_")
if global_fit:
peak_kwargs[(p_name[0], p_name[2], p_name[3])][p_name[1]] = params[model]
else:
peak_kwargs[(p_name[0], p_name[2])][p_name[1]] = params[model]
_df = df.copy()
for key, _kwarg in peak_kwargs.items():
c_name = f"{key[0]}_{key[1]}_{key[2]}" if global_fit else f"{key[0]}_{key[1]}"
_df[c_name] = getattr(DistributionModels(), key[0])(x, **_kwarg)
return _df
<|code_end|>
spectrafit/tools.py
<|code_start|>"""Collection of essential tools for running SpectraFit."""
import gzip
import json
import pickle
import sys
from pathlib import Path
from typing import Any
from typing import Dict
from typing import MutableMapping
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import tomli
import yaml
from lmfit import Minimizer
from lmfit import conf_interval
from lmfit.minimizer import MinimizerException
from spectrafit.api.tools_model import ColumnNamesAPI
from spectrafit.models import calculated_model
from spectrafit.report import RegressionMetrics
from spectrafit.report import fit_report_as_dict
class PreProcessing:
"""Summarized all pre-processing-filters together."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize PreProcessing class.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.df = df
self.args = args
def __call__(self) -> Tuple[pd.DataFrame, Dict[str, Any]]:
"""Apply all pre-processing-filters.
Returns:
pd.DataFrame: DataFrame containing the input data (`x` and `data`), which
are optionally:
1. shrinked to a given range
2. shifted
3. linear oversampled
4. smoothed
Dict[str,Any]: Adding a descriptive statistics to the input dictionary.
"""
df_copy: pd.DataFrame = self.df.copy()
self.args["data_statistic"] = df_copy.describe(
percentiles=np.arange(0.1, 1.0, 0.1)
).to_dict(orient="split")
try:
if isinstance(self.args["energy_start"], (int, float)) or isinstance(
self.args["energy_stop"], (int, float)
):
df_copy = self.energy_range(df_copy, self.args)
if self.args["shift"]:
df_copy = self.energy_shift(df_copy, self.args)
if self.args["oversampling"]:
df_copy = self.oversampling(df_copy, self.args)
if self.args["smooth"]:
df_copy = self.smooth_signal(df_copy, self.args)
except KeyError as exc:
print(f"KeyError: {exc} is not part of the dataframe!")
sys.exit(1)
return (df_copy, self.args)
@staticmethod
def energy_range(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Select the energy range for fitting.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are shrinked according to the energy range.
"""
energy_start: Union[int, float] = args["energy_start"]
energy_stop: Union[int, float] = args["energy_stop"]
df_copy: pd.DataFrame = df.copy()
if isinstance(energy_start, (int, float)) and isinstance(
energy_stop, (int, float)
):
return df_copy.loc[
(df[args["column"][0]] >= energy_start)
& (df[args["column"][0]] <= energy_stop)
]
elif isinstance(energy_start, (int, float)):
return df_copy.loc[df[args["column"][0]] >= energy_start]
elif isinstance(energy_stop, (int, float)):
return df_copy.loc[df[args["column"][0]] <= energy_stop]
@staticmethod
def energy_shift(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Shift the energy axis by a given value.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are energy-shifted by the given value.
"""
df_copy: pd.DataFrame = df.copy()
df_copy.loc[:, args["column"][0]] = (
df[args["column"][0]].to_numpy() + args["shift"]
)
return df_copy
@staticmethod
def oversampling(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Oversampling the data to increase the resolution of the data.
!!! note "About Oversampling"
In this implementation of oversampling, the data is oversampled by the
factor of 5. In case of data with only a few points, the increased
resolution should allow to easier solve the optimization problem. The
oversampling based on a simple linear regression.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are oversampled by the factor of 5.
"""
x_values = np.linspace(
df[args["column"][0]].min(),
df[args["column"][0]].max(),
5 * df.shape[0],
)
y_values = np.interp(
x_values,
df[args["column"][0]].to_numpy(),
df[args["column"][1]].to_numpy(),
)
return pd.DataFrame({args["column"][0]: x_values, args["column"][1]: y_values})
@staticmethod
def smooth_signal(df: pd.DataFrame, args: Dict[str, Any]) -> pd.DataFrame:
"""Smooth the intensity values.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the `optimized` input data
(`x` and `data`), which are smoothed by the given value.
"""
box = np.ones(args["smooth"]) / args["smooth"]
df_copy: pd.DataFrame = df.copy()
df_copy.loc[:, args["column"][1]] = np.convolve(
df[args["column"][1]].to_numpy(), box, mode="same"
)
return df_copy
class PostProcessing:
"""Post-processing of the dataframe."""
def __init__(
self, df: pd.DataFrame, args: Dict[str, Any], minimizer: Minimizer, result: Any
) -> None:
"""Initialize PostProcessing class.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
minimizer (Minimizer): The minimizer class.
result (Any): The result of the minimization of the best fit.
"""
self.args = args
self.df = self.rename_columns(df=df)
self.minimizer = minimizer
self.result = result
self.data_size = self.check_global_fitting()
def __call__(self) -> Tuple[pd.DataFrame, Dict[str, Any]]:
"""Call the post-processing."""
self.make_insight_report()
self.make_residual_fit()
self.make_fit_contributions()
self.export_correlation2args()
self.export_results2args()
self.export_regression_metrics2args()
self.export_desprective_statistic2args()
return (self.df, self.args)
def check_global_fitting(self) -> Optional[int]:
"""Check if the global fitting is performed.
!!! note "About Global Fitting"
In case of the global fitting, the data is extended by the single
contribution of the model.
Returns:
Optional[int]: The number of spectra of the global fitting.
"""
if self.args["global_"]:
return max(
int(self.result.params[i].name.split("_")[-1])
for i in self.result.params
)
return None
def rename_columns(self, df: pd.DataFrame) -> pd.DataFrame:
"""Rename the columns of the dataframe.
Rename the columns of the dataframe to the names defined in the input file.
Args:
df (pd.DataFrame): DataFrame containing the original input data, which are
individually pre-named.
Returns:
pd.DataFrame: DataFrame containing renamed columns. All column-names are
lowered. In case of a regular fitting, the columns are named `energy`
and `intensity`. In case of a global fitting, `energy` stays `energy`
and `intensity` is extended by a `_` and column index; like: `energy`
and `intensity_1`, `intensity_2`, `intensity_...` depending on
the dataset size.
"""
if self.args["global_"]:
return df.rename(
columns={
col: ColumnNamesAPI().energy
if i == 0
else f"{ColumnNamesAPI().intensity}_{i}"
for i, col in enumerate(df.columns)
}
)
return df.rename(
columns={
df.columns[0]: ColumnNamesAPI().energy,
df.columns[1]: ColumnNamesAPI().intensity,
}
)
def make_insight_report(self) -> None:
"""Make an insight-report of the fit statistic.
!!! note "About Insight Report"
The insight report based on:
1. Configurations
2. Statistics
3. Variables
4. Error-bars
5. Correlations
6. Covariance Matrix
7. _Optional_: Confidence Interval
All of the above are included in the report as dictionary in `args`.
"""
self.args["fit_insights"] = fit_report_as_dict(
self.result, modelpars=self.result.params
)
if self.args["conf_interval"]:
try:
self.args["confidence_interval"] = conf_interval(
self.minimizer, self.result, **self.args["conf_interval"]
)
except (MinimizerException, ValueError, KeyError) as exc:
print(f"Error: {exc} -> No confidence interval could be calculated!")
self.args["confidence_interval"] = {}
def make_residual_fit(self) -> None:
r"""Make the residuals of the model and the fit.
!!! note "About Residual and Fit"
The residual is calculated by the difference of the best fit `model` and
the reference `data`. In case of a global fitting, the residuals are
calculated for each `spectra` separately plus an avaraged global residual.
$$
\mathrm{residual} = \mathrm{model} - \mathrm{data}
$$
$$
\mathrm{residual}_{i} = \mathrm{model}_{i} - \mathrm{data}_{i}
$$
$$
\mathrm{residual}_{avg} = \frac{ \sum_{i}
\mathrm{model}_{i} - \mathrm{data}_{i}}{i}
$$
The fit is defined by the difference sum of fit and reference data. In case
of a global fitting, the residuals are calculated for each `spectra`
separately.
"""
df_copy: pd.DataFrame = self.df.copy()
if self.args["global_"]:
residual = self.result.residual.reshape((-1, self.data_size)).T
for i, _residual in enumerate(residual, start=1):
df_copy[f"{ColumnNamesAPI().residual}_{i}"] = _residual
df_copy[f"{ColumnNamesAPI().fit}_{i}"] = (
self.df[f"{ColumnNamesAPI().intensity}_{i}"].to_numpy() + _residual
)
df_copy[f"{ColumnNamesAPI().residual}_avg"] = np.mean(residual, axis=0)
else:
residual = self.result.residual
df_copy[ColumnNamesAPI().residual] = residual
df_copy[ColumnNamesAPI().fit] = (
self.df[ColumnNamesAPI().intensity].to_numpy() + residual
)
self.df = df_copy
def make_fit_contributions(self) -> None:
"""Make the fit contributions of the best fit model.
!!! info "About Fit Contributions"
The fit contributions are made independently of the local or global fitting.
"""
self.df = calculated_model(
params=self.result.params,
x=self.df.iloc[:, 0].to_numpy(),
df=self.df,
global_fit=self.args["global_"],
)
def export_correlation2args(self) -> None:
"""Export the correlation matrix to the input file arguments.
!!! note "About Correlation Matrix"
The linear correlation matrix is calculated from and for the pandas
dataframe and divided into two parts:
1. Linear correlation matrix
2. Non-linear correlation matrix (coming later ...)
!!! note "About reading the correlation matrix"
The correlation matrix is stored in the `args` as a dictionary with the
following keys:
* `index`
* `columns`
* `data`
For re-reading the data, it is important to use the following code:
>>> import pandas as pd
>>> pd.DataFrame(**args["linear_correlation"])
Important is to use the generator function for access the three keys and
their values.
"""
self.args["linear_correlation"] = self.df.corr().to_dict(orient="split")
def export_results2args(self) -> None:
"""Export the results of the fit to the input file arguments."""
self.args["fit_result"] = self.df.to_dict(orient="split")
def export_regression_metrics2args(self) -> None:
"""Export the regression metrics of the fit to the input file arguments.
!!! note "About Regression Metrics"
The regression metrics are calculated by the `statsmodels.stats.diagnostic`
module.
"""
self.args["regression_metrics"] = RegressionMetrics(self.df)()
def export_desprective_statistic2args(self) -> None:
"""Export the descriptive statistic of the spectra, fit, and contributions."""
self.args["descriptive_statistic"] = self.df.describe(
percentiles=np.arange(0.1, 1, 0.1)
).to_dict(orient="split")
class SaveResult:
"""Saving the result of the fitting process."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize SaveResult class.
!!! note "About SaveResult"
The SaveResult class is responsible for saving the results of the
optimization process. The results are saved in the following formats:
1. JSON (default) for all results and meta data of the fitting process.
2. CSV for the results of the optimization process.
!!! note "About the output `CSV`-file"
The output files are seperated into three classes:
1. The `results` of the optimization process.
2. The `correlation analysis` of the optimization process.
3. The `error analysis` of the optimization process.
The result outputfile contains the following information:
1. The column names of the energy axis (`x`) and the intensity values
(`data`)
2. The name of the column containing the energy axis (`x`)
3. The name of the column containing the intensity values (`data`)
4. The name of the column containing the best fit (`best_fit`)
5. The name of the column containing the residuum (`residuum`)
6. The name of the column containing the model contribution (`model`)
7. The name of the column containing the error of the model
contribution (`model_error`)
8. The name of the column containing the error of the best fit
(`best_fit_error`)
9. The name of the column containing the error of the residuum
(`residuum_error`)
The `correlation analysis` file contains the following information about all
attributes of the model:
1. Energy
2. Intensity or Intensities (global fitting)
3. Residuum
4. Best fit
5. Model contribution(s)
The `error analysis` file contains the following information about all model
attributes vs:
1. Initial model values
2. Current model values
3. Best model values
4. Residuum / error relative to the best fit
5. Residuum / error relative to the absolute fit
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will
be extended by the single contribution of the model.
args (Dict[str,Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.df = df
self.args = args
def __call__(self) -> None:
"""Call the SaveResult class."""
self.save_as_json()
self.save_as_csv()
def save_as_csv(self) -> None:
"""Save the the fit results to csv files.
!!! note "About saving the fit results"
The fit results are saved to csv files and are divided into three different
categories:
1. The `results` of the optimization process.
2. The `correlation analysis` of the optimization process.
3. The `error analysis` of the optimization process.
"""
self.df.to_csv(Path(f"{self.args['outfile']}_fit.csv"), index=False)
print(self.args["fit_insights"]["variables"])
pd.DataFrame(**self.args["linear_correlation"]).to_csv(
Path(f"{self.args['outfile']}_correlation.csv"),
index=True,
index_label="attributes",
)
pd.DataFrame.from_dict(self.args["fit_insights"]["variables"]).to_csv(
Path(f"{self.args['outfile']}_errors.csv"),
index=True,
index_label="attributes",
)
def save_as_json(self) -> None:
"""Save the fitting result as json file."""
if self.args["outfile"]:
with open(
Path(f"{self.args['outfile']}_summary.json"), "w", encoding="utf-8"
) as f:
json.dump(self.args, f, indent=4)
else:
raise FileNotFoundError("No output file provided!")
def read_input_file(fname: Path) -> MutableMapping[str, Any]:
"""Read the input file.
Read the input file as `toml`, `json`, or `yaml` files and return as a dictionary.
Args:
fname (str): Name of the input file.
Raises:
OSError: If the input file is not supported.
Returns:
dict: Return the input file arguments as a dictionary with additional
information beyond the command line arguments.
"""
fname = Path(fname)
if fname.suffix == ".toml":
with open(fname, "rb") as f:
args = tomli.load(f)
elif fname.suffix == ".json":
with open(fname, encoding="utf-8") as f:
args = json.load(f)
elif fname.suffix in [".yaml", ".yml"]:
with open(fname, encoding="utf-8") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
else:
raise OSError(
f"ERROR: Input file {fname} has not supported file format.\n"
"Supported fileformats are: '*.json', '*.yaml', and '*.toml'"
)
return args
def load_data(args: Dict[str, str]) -> pd.DataFrame:
"""Load the data from a txt file.
!!! note "About the data format"
Load data from a txt file, which can be an ASCII file as txt, csv, or
user-specific but rational file. The file can be separated by a delimiter.
In case of 2d data, the columns has to be defined. In case of 3D data, all
columns are considered as data.
Args:
args (Dict[str,str]): The input file arguments as a dictionary with additional
information beyond the command line arguments.
Returns:
pd.DataFrame: DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will be
extended by the single contribution of the model.
"""
try:
if args["global_"]:
return pd.read_csv(
args["infile"],
sep=args["separator"],
header=args["header"],
dtype=np.float64,
decimal=args["decimal"],
comment=args["comment"],
)
return pd.read_csv(
args["infile"],
sep=args["separator"],
header=args["header"],
usecols=args["column"],
dtype=np.float64,
decimal=args["decimal"],
comment=args["comment"],
)
except ValueError as exc:
print(f"Error: {exc} -> Dataframe contains non numeric data!")
sys.exit(1)
def check_keywords_consistency(
check_args: MutableMapping[str, Any], ref_args: Dict[str, Any]
) -> None:
"""Check if the keywords are consistent.
Check if the keywords are consistent between two dictionaries. The two dictionaries
are reference keywords of the `cmd_line_args` and the `args` of the `input_file`.
Args:
check_args (MutableMapping[str, Any]): First dictionary to be checked.
ref_args (Dict[str,Any]): Second dictionary to be checked.
Raises:
KeyError: If the keywords are not consistent.
"""
for key in check_args:
if key not in ref_args.keys():
raise KeyError(f"ERROR: The {key} is not parameter of the `cmd-input`!")
def unicode_check(f: Any, encoding: str = "latin1") -> Any:
"""Check if the pkl file is encoded in unicode.
Args:
f (Any): The pkl file to load.
encoding (str, optional): The encoding to use. Defaults to "latin1".
Returns:
Any: The pkl file, which can be a nested dictionary containing raw data,
metadata, and other information.
"""
try:
data_dict = pickle.load(f)
except UnicodeDecodeError: # pragma: no cover
data_dict = pickle.load(f, encoding=encoding)
return data_dict
def pkl2any(pkl_fname: Path, encoding: str = "latin1") -> Any:
"""Load a pkl file and return the data as a any type of data or object.
Args:
pkl_fname (Path): The pkl file to load.
encoding (str, optional): The encoding to use. Defaults to "latin1".
Raises:
ValueError: If the file format is not supported.
Returns:
Any: Data or objects, which can contain various data types supported by pickle.
"""
if pkl_fname.suffix == ".gz":
with gzip.open(pkl_fname, "rb") as f:
return unicode_check(f, encoding=encoding)
elif pkl_fname.suffix == ".pkl":
with open(pkl_fname, "rb") as f:
return unicode_check(f, encoding=encoding)
else:
choices = [".pkl", ".pkl.gz"]
raise ValueError(
f"File format '{pkl_fname.suffix}' is not supported. "
f"Supported file formats are: {choices}"
)
def pure_fname(fname: Path) -> Path:
"""Return the filename without the suffix.
Pure filename without the suffix is implemented to avoid the problem with
multiple dots in the filename like `test.pkl.gz` or `test.tar.gz`.
The `stem` attribute of the `Path` class returns the filename without the
suffix, but it also removes only the last suffix. Hence, the `test.pkl.gz`
will be returned as `test.pkl` and not as `test`. This function returns
the filename without the suffix. It is implemented recursively to remove
all suffixes.
Args:
fname (Path): The filename to be processed.
Returns:
Path: The filename without the suffix.
"""
_fname = fname.parent / fname.stem
return pure_fname(_fname) if _fname.suffix else _fname
<|code_end|>
|
[Docs]: Using builtin release drafter
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "0.16.3"
<|code_end|>
spectrafit/api/models_model.py
<|code_start|>"""Reference model for the API of the models distributions."""
from typing import Callable
from typing import List
from typing import Optional
from pydantic import BaseModel
from pydantic import Field
__description__ = "Lmfit expression for explicit dependencies."
class AmplitudeAPI(BaseModel):
"""Definition of the amplitude of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum amplitude.")
min: Optional[int] = Field(default=None, description="Minimum amplitude.")
vary: bool = Field(default=True, description="Vary the amplitude.")
value: Optional[float] = Field(default=None, description="Initial Amplitude value.")
expr: Optional[str] = Field(default=None, description=__description__)
class CenterAPI(BaseModel):
"""Definition of the center of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum center.")
min: Optional[int] = Field(default=None, description="Minimum center.")
vary: bool = Field(default=True, description="Vary the center.")
value: Optional[float] = Field(default=None, description="Initial Center value.")
expr: Optional[str] = Field(default=None, description=__description__)
class FwhmgAPI(BaseModel):
"""Definition of the FWHM Gaussian of the models distributions."""
max: Optional[float] = Field(
default=None,
description="Maximum Full Width Half Maximum of the Gaussian Distribution.",
)
min: Optional[int] = Field(
default=None,
description="Minimum Full Width Half Maximum of the Gaussian Distribution.",
)
vary: bool = Field(
default=True,
description="Vary the Full Width Half Maximum of the Gaussian Distribution.",
)
value: Optional[float] = Field(
default=None,
description="Initial Full Width Half Maximum of "
"the Gaussian Distribution value.",
)
expr: Optional[str] = Field(default=None, description=__description__)
class FwhmlAPI(BaseModel):
"""Definition of the FWHM Lorentzian of the models distributions."""
max: Optional[float] = Field(
default=None,
description="Maximum Full Width Half Maximum of the Lorentzian Distribution.",
)
min: Optional[int] = Field(
default=None,
description="Minimum Full Width Half Maximum of the Lorentzian Distribution.",
)
vary: bool = Field(
default=True,
description="Vary the Full Width Half Maximum of the Lorentzian Distribution.",
)
value: Optional[float] = Field(
default=None,
description="Initial Full Width Half Maximum of "
"the Lorentzian Distribution value.",
)
expr: Optional[str] = Field(default=None, description=__description__)
class FwhmvAPI(BaseModel):
"""Definition of the FWHM Voigt of the models distributions."""
max: Optional[float] = Field(
default=None,
description="Maximum Full Width Half Maximum of the Voigt Distribution.",
)
min: Optional[int] = Field(
default=None,
description="Minimum Full Width Half Maximum of the Voigt Distribution.",
)
vary: bool = Field(
default=True,
description="Vary the Full Width Half Maximum of the Voigt Distribution.",
)
value: Optional[float] = Field(
default=None,
description="Initial Full Width Half Maximum of the Voigt Distribution value.",
)
expr: Optional[str] = Field(default=None, description=__description__)
class GammaAPI(BaseModel):
"""Definition of the Gamma of the Voigt of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum gamma.")
min: Optional[int] = Field(default=None, description="Minimum gamma.")
vary: bool = Field(default=True, description="Vary the gamma.")
value: Optional[float] = Field(default=None, description="Initial Gamma value.")
expr: Optional[str] = Field(default=None, description=__description__)
class DecayAPI(BaseModel):
"""Definition of the Decay of the Exponential of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum decay rate.")
min: Optional[int] = Field(default=None, description="Minimum decay rate.")
vary: bool = Field(default=True, description="Vary the decay rate.")
value: Optional[float] = Field(
default=None, description="Initial decay rate value."
)
expr: Optional[str] = Field(default=None, description=__description__)
class InterceptAPI(BaseModel):
"""Definition of the Intercept of the Linear of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum intercept.")
min: Optional[int] = Field(default=None, description="Minimum intercept.")
vary: bool = Field(default=True, description="Vary the intercept.")
value: Optional[float] = Field(default=None, description="Initial intercept value.")
expr: Optional[str] = Field(default=None, description=__description__)
class ExponentAPI(BaseModel):
"""Definition of the Exponent of the Linear of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum exponent.")
min: Optional[int] = Field(default=None, description="Minimum exponent.")
vary: bool = Field(default=True, description="Vary the exponent.")
value: Optional[float] = Field(default=None, description="Initial exponent value.")
expr: Optional[str] = Field(default=None, description=__description__)
class SlopeAPI(BaseModel):
"""Definition of the Slope of the Linear of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum slope.")
min: Optional[int] = Field(default=None, description="Minimum slope.")
vary: bool = Field(default=True, description="Vary the slope.")
value: Optional[float] = Field(default=None, description="Inital slope value.")
expr: Optional[str] = Field(default=None, description=__description__)
class SigmaAPI(BaseModel):
"""Definition of the Sigma of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum sigma.")
min: Optional[int] = Field(default=None, description="Minimum sigma.")
vary: bool = Field(default=True, description="Vary the sigma.")
value: Optional[float] = Field(default=None, description="Initial sigma value.")
expr: Optional[str] = Field(default=None, description=__description__)
class PseudovoigtAPI(BaseModel):
"""Definition of the Pseudovoigt of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
fwhmg: FwhmgAPI = FwhmgAPI()
fwhml: FwhmlAPI = FwhmlAPI()
class GaussianAPI(BaseModel):
"""Definition of the Gaussian of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
sigma: SigmaAPI = SigmaAPI()
class LorentzianAPI(BaseModel):
"""Definition of the Lorentzian of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
fwhml: FwhmlAPI = FwhmlAPI()
class VoigtAPI(BaseModel):
"""Definition of the Voigt of the models distributions."""
center: CenterAPI = CenterAPI()
fwhmv: FwhmvAPI = FwhmvAPI()
gamma: GammaAPI = GammaAPI()
class ExponentialAPI(BaseModel):
"""Definition of the Exponential of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
decay: DecayAPI = DecayAPI()
intercept: InterceptAPI = InterceptAPI()
class PowerAPI(BaseModel):
"""Definition of the Power of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
exponent: ExponentAPI = ExponentAPI()
intercept: InterceptAPI = InterceptAPI()
class LinearAPI(BaseModel):
"""Definition of the Linear of the models distributions."""
slope: SlopeAPI = SlopeAPI()
intercept: InterceptAPI = InterceptAPI()
class ConstantAPI(BaseModel):
"""Definition of the Constant of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
class StepAPI(BaseModel):
"""Definition of the Step of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
sigma: SigmaAPI = SigmaAPI()
kind: str = Field(default="step", description="Kind of step function.")
class DistributionModelAPI(BaseModel):
"""Definition of the models distributions."""
pseudovoigt: PseudovoigtAPI = PseudovoigtAPI()
gaussian: GaussianAPI = GaussianAPI()
lorentzian: LorentzianAPI = LorentzianAPI()
voigt: VoigtAPI = VoigtAPI()
exponent: ExponentAPI = ExponentAPI()
power: PowerAPI = PowerAPI()
linear: LinearAPI = LinearAPI()
constant: ConstantAPI = ConstantAPI()
step: StepAPI = StepAPI()
class ConfIntervalAPI(BaseModel):
"""Definition of Confidence Interval Function."""
p_names: Optional[List[str]] = Field(
default=None, description="List of parameters names."
)
trace: bool = Field(
default=True, description="Trace of the confidence interfall matrix."
)
maxiter: int = Field(
default=200,
gt=1,
le=2000,
description="Maximum number of iteration",
)
verbose: bool = Field(
default=False, description="Print information about the fit process."
)
prob_func: Optional[Callable[[float], float]] = Field(
default=None, description="Probing function."
)
<|code_end|>
spectrafit/models.py
<|code_start|>"""Minimization models for curve fitting."""
from collections import defaultdict
from dataclasses import dataclass
from math import log
from math import pi
from math import sqrt
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from lmfit import Minimizer
from lmfit import Parameters
from numpy.typing import NDArray
from scipy.signal import find_peaks
from scipy.special import erf
from scipy.special import wofz
from scipy.stats import hmean
from spectrafit.api.tools_model import AutopeakAPI
from spectrafit.api.tools_model import GlobalFittingAPI
from spectrafit.api.tools_model import SolverModelsAPI
class DistributionModels:
"""Distribution models for the fit.
!!! note "About distribution models"
`DistributionModels` are wrapper functions for the distribution models. The
overall goal is to extract from the best parameters the single contributions in
the model. The superposition of the single contributions is the final model.
!!! note "About the cumulative distribution"
The cumulative distribution is the sum of the single contributions. The
cumulative distribution is the model that is fitted to the data. In contrast to
the single contributions, the cumulative distribution is not normalized and
therefore the amplitude of the single contributions is not directly comparable
to the amplitude of the cumulative distribution. Also, the cumulative
distributions are consquently using the `fwhm` parameter instead of the
`sigma` parameter.
"""
def gaussian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Gaussian distribution.
$$
{\displaystyle g(x)={\frac {1}{\sigma {\sqrt {2\pi }}}}\exp
( -{\frac {1}{2}}{\frac {(x-\mu )^{2}}{\sigma ^{2}}} ) }
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Gaussian distribution.
Defaults to 1.0.
center (float, optional): Center of the Gaussian distribution.
Defaults to 0.0.
fwhmg (float, optional): Full width at half maximum (FWHM) of the Gaussian
distribution. Defaults to 1.0.
Returns:
NDArray[np.float64]: Gaussian distribution of `x` given.
"""
sigma = fwhmg * Constants.fwhmg2sig
return np.array(amplitude / (Constants.sq2pi * sigma)) * np.exp(
-((1.0 * x - center) ** 2) / (2 * sigma**2)
)
def lorentzian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Lorentzian distribution.
$$
f(x;x_{0},\gamma )={\frac {1}{\pi \gamma
[ 1+ ( {\frac {x-x_{0}}{\gamma }})^{2} ]
}} ={1 \over \pi \gamma } [ {\gamma ^{2} \over (x-x_{0})^{2}+\gamma ^{2}} ]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Lorentzian distribution.
Defaults to 1.0.
center (float, optional): Center of the Lorentzian distribution. Defaults to
0.0.
fwhml (float, optional): Full width at half maximum (FWHM) of the Lorentzian
distribution. Defaults to 1.0.
Returns:
Union[NDArray[np.float64], float]: Lorentzian distribution of `x` given.
"""
sigma = fwhml * Constants.fwhml2sig
return np.array(amplitude / (1 + ((1.0 * x - center) / sigma) ** 2)) / (
pi * sigma
)
def voigt(
self,
x: NDArray[np.float64],
center: float = 0.0,
fwhmv: float = 1.0,
gamma: Optional[float] = None,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Voigt distribution.
$$
{\displaystyle V(x;\sigma ,\gamma )\equiv
\int_{-\infty }^{\infty }G(x';\sigma )
L(x-x';\gamma )\,dx'}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
center (float, optional): Center of the Voigt distribution. Defaults to 0.0.
fwhmv (float, optional): Full width at half maximum (FWHM) of the Lorentzian
distribution. Defaults to 1.0.
gamma (float, optional): Scaling factor of the complex part of the
[Faddeeva Function](https://en.wikipedia.org/wiki/Faddeeva_function).
Defaults to None.
Returns:
NDArray[np.float64]: Voigt distribution of `x` given.
"""
sigma = fwhmv * Constants.fwhmv2sig
if gamma is None:
gamma = sigma
z = (x - center + 1j * gamma) / (sigma * Constants.sq2)
return np.array(wofz(z).real / (sigma * Constants.sq2pi))
def pseudovoigt(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional Pseudo-Voigt distribution.
!!! note "See also:"
J. Appl. Cryst. (2000). 33, 1311-1316
https://doi.org/10.1107/S0021889800010219
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Pseudo-Voigt distribution.
Defaults to 1.0.
center (float, optional): Center of the Pseudo-Voigt distribution.
Defaults to 0.0.
fwhmg (float, optional): Full width half maximum of the Gaussian
distribution in the Pseudo-Voigt distribution. Defaults to 1.0.
fwhml (float, optional): Full width half maximum of the Lorentzian
distribution in the Pseudo-Voigt distribution. Defaults to 1.0.
Returns:
NDArray[np.float64]: Pseudo-Voigt distribution of `x` given.
"""
f = np.power(
fwhmg**5
+ 2.69269 * fwhmg**4 * fwhml
+ 2.42843 * fwhmg**3 * fwhml**2
+ 4.47163 * fwhmg**2 * fwhml**3
+ 0.07842 * fwhmg * fwhml**4
+ fwhml**5,
0.2,
)
n = (
1.36603 * (fwhml / f)
- 0.47719 * (fwhml / f) ** 2
+ 0.11116 * (fwhml / f) ** 3
)
return np.array(
n * self.lorentzian(x=x, amplitude=amplitude, center=center, fwhml=fwhml)
+ (1 - n)
* self.gaussian(x=x, amplitude=amplitude, center=center, fwhmg=fwhmg)
)
def exponential(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
decay: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional exponential decay.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the exponential function.
Defaults to 1.0.
decay (float, optional): Decay of the exponential function. Defaults to 1.0.
intercept (float, optional): Intercept of the exponential function.
Defaults to 0.0.
Returns:
NDArray[np.float64]: Exponential decay of `x` given.
"""
return np.array(amplitude * np.exp(-x / decay) + intercept)
def power(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
exponent: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional power function.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the power function. Defaults to
1.0.
exponent (float, optional): Exponent of the power function. Defaults to 1.0.
intercept (float, optional): Intercept of the power function. Defaults to
0.0.
Returns:
NDArray[np.float64]: power function of `x` given.
"""
return np.array(amplitude * np.power(x, exponent) + intercept)
def linear(
self,
x: NDArray[np.float64],
slope: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional linear function.
Args:
x (NDArray[np.float64]): `x`-values of the data.
slope (float, optional): Slope of the linear function. Defaults to 1.0.
intercept (float, optional): Intercept of the linear function.
Defaults to 0.0.
Returns:
NDArray[np.float64]: Linear function of `x` given.
"""
return np.array(slope * x + intercept)
def constant(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional constant value.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the constant. Defaults to 1.0.
Returns:
NDArray[np.float64]: Constant value of `x` given.
"""
return np.array(np.linspace(amplitude, amplitude, len(x)))
@staticmethod
def _norm(
x: NDArray[np.float64], center: float, sigma: float
) -> NDArray[np.float64]:
"""Normalize the data for step functions.
Args:
x (NDArray[np.float64]): `x`-values of the data.
center (float): Center of the step function.
sigma (float): Sigma of the step function.
Returns:
NDArray[np.float64]: Normalized data.
"""
if abs(sigma) < 1.0e-13:
sigma = 1.0e-13
return np.subtract(x, center) / sigma
def erf(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional error function.
$$
f(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the error function.
Defaults to 1.0.
center (float, optional): Center of the error function. Defaults to 0.0.
sigma (float, optional): Sigma of the error function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Error function of `x` given.
"""
return np.array(amplitude * 0.5 * (1 + erf(self._norm(x, center, sigma))))
def heaviside(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Heaviside step function.
$$
f(x) = \begin{cases}
0 & x < 0 \\
0.5 & x = 0 \\
1 & x > 0
\end{cases}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Heaviside step function.
Defaults to 1.0.
center (float, optional): Center of the Heaviside step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the Heaviside step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Heaviside step function of `x` given.
"""
return np.array(amplitude * 0.5 * (1 + np.sign(self._norm(x, center, sigma))))
def atan(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional arctan step function.
$$
f(x) = \frac{1}{\pi} \arctan(\frac{x - c}{s})
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the arctan step function.
Defaults to 1.0.
center (float, optional): Center of the arctan step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the arctan step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Arctan step function of `x` given.
"""
return np.array(
amplitude * 0.5 * (1 + np.arctan(self._norm(x, center, sigma)) / pi)
)
def log(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional logarithmic step function.
$$
f(x) = \frac{1}{1 + e^{-\frac{x - c}{s}}}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the logarithmic step function.
Defaults to 1.0.
center (float, optional): Center of the logarithmic step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the logarithmic step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Logarithmic step function of `x` given.
"""
return np.array(
amplitude * 0.5 * (1 + np.log(self._norm(x, center, sigma)) / pi)
)
def cgaussian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Gaussian function.
$$
f(x) = \frac{1}{2} \left[1 + erf\left(\frac{x - c}{s \sqrt{2}}\right)\right]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Gaussian function. Defaults to
1.0.
center (float, optional): Center of the Gaussian function. Defaults to 0.0.
fwhmg (float, optional): Full width at half maximum of the Gaussian
function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Gaussian function of `x` given.
"""
sigma = fwhmg * Constants.fwhmg2sig
return np.array(
amplitude * 0.5 * (1 + erf((x - center) / (sigma * np.sqrt(2.0))))
)
def clorentzian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Lorentzian function.
$$
f(x) = \frac{1}{\pi} \arctan\left(\frac{x - c}{s}\right) + \frac{1}{2}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Lorentzian function.
Defaults to 1.0.
center (float, optional): Center of the Lorentzian function.
Defaults to 0.0.
fwhml (float, optional): Full width at half maximum of the Lorentzian
function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Lorentzian function of `x` given.
"""
sigma = fwhml * Constants.fwhml2sig
return np.array(amplitude * (np.arctan((x - center) / sigma) / pi) + 0.5)
def cvoigt(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmv: float = 1.0,
gamma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Voigt function.
$$
f(x) = \frac{1}{2} \left[1 + erf\left(\frac{x - c}{s \sqrt{2}}\right)\right]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Voigt function. Defaults to
1.0.
center (float, optional): Center of the Voigt function. Defaults to 0.0.
fwhmv (float, optional): Full width at half maximum of the Voigt function.
Defaults to 1.0.
gamma (float, optional): Gamma of the Voigt function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Voigt function of `x` given.
"""
sigma = fwhmv * Constants.fwhmv2sig
return np.array(
amplitude
* 0.5
* (1 + erf((x - center) / (sigma * np.sqrt(2.0))))
* np.exp(-(((x - center) / gamma) ** 2))
)
@dataclass(frozen=True)
class ReferenceKeys:
"""Reference keys for model fitting and peak detection."""
__models__ = [
func
for func in dir(DistributionModels)
if callable(getattr(DistributionModels, func)) and not func.startswith("_")
]
__automodels__ = [
"gaussian",
"lorentzian",
"voigt",
"pseudovoigt",
]
def model_check(self, model: str) -> None:
"""Check if model is available.
Args:
model (str): Model name.
Raises:
KeyError: If the model is not supported.
"""
if model.split("_")[0] not in self.__models__:
raise NotImplementedError(f"{model} is not supported!")
def automodel_check(self, model: str) -> None:
"""Check if model is available.
Args:
model (str): Auto Model name (gaussian, lorentzian, voigt, or pseudovoigt).
Raises:
KeyError: If the model is not supported.
"""
if model not in self.__automodels__:
raise KeyError(f"{model} is not supported!")
def detection_check(self, args: Dict[str, Any]) -> None:
"""Check if detection is available.
Args:
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Raises:
KeyError: If the key is not parameter of the `scipy.signal.find_peaks`
function. This will be checked via `pydantic` in `spectrafit.api`.
"""
AutopeakAPI(**args)
@dataclass(frozen=True)
class Constants:
r"""Mathematical constants for the curve models.
!!! info "Constants"
1. Natural logarithm of 2
$$
ln2 = \log{2}
$$
2. Square root of 2 times pi
$$
sq2pi = \sqrt{2 \pi}
$$
3. Square root of pi
$$
sqpi = \sqrt{ \pi}
$$
4. Square root of 2
$$
sq2 = \sqrt{2}
$$
5. Full width at half maximum to sigma for Gaussian
$$
fwhmg2sig = \frac{1}{ 2 \sqrt{2\log{2}}}
$$
6. Full width at half maximum to sigma for Lorentzian
$$
fwhml2sig = \frac{1}{2}
$$
7. Full width at half maximum to sigma for Voigt according to the article by
Olivero and Longbothum[^1], check also
[XPSLibary website](https://xpslibrary.com/voigt-peak-shape/).
$$
fwhm_{\text{Voigt}} \approx 0.5346 \cdot fwhm_{\text{Gaussian}} +
\sqrt{ 0.2166 fwhm_{\text{Lorentzian}}^2 + fwhm_{\text{Gaussian}}^2 }
$$
In case of equal FWHM for Gaussian and Lorentzian, the Voigt FWHM can be
defined as:
$$
fwhm_{\text{Voigt}} \approx 1.0692 + 2 \sqrt{0.2166 + 2 \ln{2}} \cdot \sigma
$$
$$
fwhmv2sig = \frac{1}{fwhm_{\text{Voigt}}}
$$
[^1]:
J.J. Olivero, R.L. Longbothum,
_Empirical fits to the Voigt line width: A brief review_,
**Journal of Quantitative Spectroscopy and Radiative Transfer**,
Volume 17, Issue 2, 1977, Pages 233-236, ISSN 0022-4073,
https://doi.org/10.1016/0022-4073(77)90161-3.
"""
ln2 = log(2.0)
sq2pi = sqrt(2.0 * pi)
sqpi = sqrt(pi)
sq2 = sqrt(2.0)
fwhmg2sig = 1 / (2.0 * sqrt(2.0 * log(2.0)))
fwhml2sig = 1 / 2.0
fwhmv2sig = 1 / (2 * 0.5346 + 2 * sqrt(0.2166 + log(2) * 2))
class AutoPeakDetection:
"""Automatic detection of peaks in a spectrum."""
def __init__(
self,
x: NDArray[np.float64],
data: NDArray[np.float64],
args: Dict[str, Any],
) -> None:
"""Initialize the AutoPeakDetection class.
Args:
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 1d-array.
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.x = x
self.data = data
self._args = args["autopeak"]
@staticmethod
def check_key_exists(
key: str, args: Dict[str, Any], value: Union[float, Tuple[Any, Any]]
) -> Any:
"""Check if a key exists in a dictionary.
Please check for the reference key also [scipy.signal.find_peaks][1].
[1]:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
Args:
key (str): Reference key of `scipy.signal.find_peaks`.
args (Dict[str, Any]): Reference values of `scipy.signal.find_peaks`, if not
defined will be set to estimated default values.
value (Union[float, Tuple[float,float]]): Default value for the reference
key.
Returns:
Any: The reference value for `scipy.signal.find_peaks`.
"""
return args.get(key, value)
@property
def estimate_height(self) -> Tuple[float, float]:
r"""Estimate the initial height based on an inverse noise ratio of a signal.
!!! info "About the estimation of the height"
The lower end of the height is the inverse noise ratio of the `data`, and
upper limit is the maximum value of the `data`. The noise ratio of the
`data` is based on the original implementation by `SciPy`:
```python
def signaltonoise(a, axis=0, ddof=0):
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m / sd)
```
Returns:
Tuple[float, float]: Tuple of the inverse signal to noise ratio and
the maximum value of the `data`.
"""
return 1 - self.data.mean() / self.data.std(), self.data.max()
@property
def estimate_threshold(self) -> Tuple[float, float]:
"""Estimate the threshold value for the peak detection.
Returns:
Tuple[float, float]: Minimum and maximum value of the spectrum `data`,
respectively, `intensity`.
"""
return self.data.min(), self.data.max()
@property
def estimate_distance(self) -> float:
"""Estimate the initial distance between peaks.
Returns:
float: Estimated distance between peaks.
"""
min_step = np.diff(self.x).min()
return max(min_step, 1.0)
@property
def estimate_prominence(self) -> Tuple[float, float]:
"""Estimate the prominence of a peak.
!!! info "About the estimation of the prominence"
The prominence is the difference between the height of the peak and the
bottom. To get a estimate of the prominence, the height of the peak is
calculated by maximum value of the `data` and the bottom is calculated by
the harmonic mean of the `data`.
Returns:
Tuple[float, float]: Tuple of the harmonic-mean and maximum value of `data`.
"""
try:
return hmean(self.data), self.data.max()
except ValueError as exc:
print(f"{exc}: Using standard arithmetic mean of NumPy.\n")
return self.data.mean(), self.data.max()
@property
def estimated_width(self) -> Tuple[float, float]:
"""Estimate the width of a peak.
!!! info "About the estimation of the width"
The width of a peak is estimated for a lower and an upper end. For the lower
end, the minimum stepsize is used. For the upper end, the stepsize between
the half maximum and the minimum value of the `data` is used as the width.
Returns:
Tuple[float, float]: Estimated width lower and uper end of the peaks.
"""
return (
np.diff(self.x).min(),
np.abs(self.x[self.data.argmax()] - self.x[self.data.argmin()]) / 2,
)
@property
def estimated_rel_height(self) -> float:
"""Estimate the relative height of a peak.
!!! info "About the estimation of the relative height"
The relative height of a peak is approximated by the difference of the
harmonic mean value of the `data` and the minimum value of the `data`
divided by the factor of `4`. In case of negative ratios, the value will be
set to `Zero`.
Returns:
float: Estimated relative height of a peak.
"""
try:
rel_height = (hmean(self.data) - self.data.min()) / 4
except ValueError as exc:
print(f"{exc}: Using standard arithmetic mean of NumPy.\n")
rel_height = (self.data.mean() - self.data.min()) / 4
return rel_height if rel_height > 0 else 0.0
@property
def estimated_wlen(self) -> float:
r"""Estimate the window length for the peak detection.
!!! info "About the estimation of the window length"
The window length is the length of the window for the peak detection is
defined to be 1% of the length of the `data`, consequently the len of the
`data` is divided by 100. In case of a window length smaller than 1, the
window length will be set to numerical value of 1, which is defined by
`1 + 1e-9`.
Returns:
float: Estimated window length is set to the numeric value of > 1.
"""
wlen = self.data.size / 100
return wlen if wlen > 1.0 else 1 + 1e-9
@property
def estimated_plateau_size(self) -> Tuple[float, float]:
"""Estimate the plateau size for the peak detection.
Returns:
Tuple[float, float]: Estimated plateau size is set to `zero` for the lower
end and the maximum value of the `x` for the upper end.
"""
return 0.0, self.x.max()
def initialize_peak_detection(self) -> None:
"""Initialize the peak detection.
!!! note "Initialize the peak detection"
This method is used to initialize the peak detection. The initialization can
be activated by setting the `initialize` attribute to `True`, which will
automatically estimate the default parameters for the peak detection. In
case of the `initialize` attribute is defined as dictionary, the proposed
values are taken from the dictionary if th
Raise:
TypeError: If the `initialize` attribute is not of type `bool` or `dict`.
"""
if isinstance(self._args, bool):
self.default_values()
elif isinstance(self._args, dict):
ReferenceKeys().detection_check(self._args)
self.height = self.check_key_exists(
key="height", args=self._args, value=self.estimate_height
)
self.threshold = self.check_key_exists(
key="threshold", args=self._args, value=self.estimate_threshold
)
self.distance = self.check_key_exists(
key="distance", args=self._args, value=self.estimate_distance
)
self.prominence = self.check_key_exists(
key="prominence", args=self._args, value=self.estimate_prominence
)
self.width = self.check_key_exists(
key="width", args=self._args, value=self.estimated_width
)
self.wlen = self.check_key_exists(
key="wlen", args=self._args, value=self.estimated_wlen
)
self.rel_height = self.check_key_exists(
key="rel_height", args=self._args, value=self.estimated_rel_height
)
self.plateau_size = self.check_key_exists(
key="plateau_size", args=self._args, value=0.0
)
else:
raise TypeError(
f"The type of the `args` is not supported: {type(self._args)}"
)
def default_values(self) -> None:
"""Set the default values for the peak detection."""
self.height = self.estimate_height
self.threshold = self.estimate_threshold
self.distance = self.estimate_distance
self.prominence = self.estimate_prominence
self.width = self.estimated_width
self.wlen = self.estimated_wlen
self.rel_height = self.estimated_rel_height
self.plateau_size = 0
def __autodetect__(self) -> Any:
"""Return peak positions and properties."""
return find_peaks(
self.data,
height=self.height,
threshold=self.threshold,
distance=self.distance,
prominence=self.prominence,
width=self.width,
wlen=self.wlen,
rel_height=self.rel_height,
plateau_size=self.plateau_size,
)
class ModelParameters(AutoPeakDetection):
"""Class to define the model parameters."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize the model parameters.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]):
Nested arguments dictionary for the model based on **one** or **two**
`int` keys depending if global fitting parameters, will explicit
defined or not.
!!! note "About `args` for models"
The `args` dictionary is used to define the model parameters. And the total
nested dictionary structure is as follows:
```python
args: Dict[str, Dict[int, Dict[str, Dict[str, Union[str, int, float]]]]]
```
!!! info "About the fitting options"
In general, there are two option for the fitting possible:
1. `Classic fitting` or `local fitting`, where the parameters are defined
for a 2D spectrum.
2. `Global fitting`, where the parameters are defined for a 3D spectrum.
Here, the parameters can be automatically defined for each column on the
basis of the initial parameters or they can be completley defined by the
user. The `global fitting` definition starts at `1` similiar to the
peaks attributes notation.
"""
self.col_len = df.shape[1] - 1
self.args = args
self.params = Parameters()
self.x, self.data = self.df_to_numvalues(df=df, args=args)
super().__init__(self.x, self.data, self.args)
def df_to_numvalues(
self, df: pd.DataFrame, args: Dict[str, Any]
) -> Tuple[NDArray[np.float64], NDArray[np.float64]]:
"""Transform the dataframe to numeric values of `x` and `data`.
!!! note "About the dataframe to numeric values"
The transformation is done by the `value` property of pandas. The dataframe
is separated into the `x` and `data` columns and the `x` column is
transformed to the energy values and the `data` column is transformed to
the intensity values depending on the `args` dictionary. In terms of global
fitting, the `data` contains the intensity values for each column.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
Tuple[NDArray[np.float64], NDArray[np.float64]]: Tuple of `x` and
`data` as numpy arrays.
"""
if args["global_"]:
return (
df[args["column"][0]].to_numpy(),
df.loc[:, df.columns != args["column"][0]].to_numpy(),
)
return (df[args["column"][0]].to_numpy(), df[args["column"][1]].to_numpy())
@property
def return_params(self) -> Parameters:
"""Return the `class` representation of the model parameters.
Returns:
Parameters: Model parameters class.
"""
self.__perform__()
return self.params
def __str__(self) -> str:
"""Return the `string` representation of the model parameters.
Returns:
str: String representation of the model parameters.
"""
self.__perform__()
return str(self.params)
def __perform__(self) -> None:
"""Perform the model parameter definition.
Raises:
KeyError: Global fitting is combination with automatic peak detection is
not implemented yet.
"""
if self.args["global_"] == 0 and not self.args["autopeak"]:
self.define_parameters()
elif self.args["global_"] == 1 and not self.args["autopeak"]:
self.define_parameters_global()
elif self.args["global_"] == 2 and not self.args["autopeak"]:
self.define_parameters_global_pre()
elif self.args["global_"] == 0:
self.initialize_peak_detection()
self.define_parameters_auto()
elif self.args["global_"] in [1, 2]:
raise KeyError(
"Global fitting mode with automatic peak detection "
"is not supported yet."
)
def define_parameters_auto(self) -> None:
"""Auto define the model parameters for local fitting."""
positions, properties = self.__autodetect__()
if (
not isinstance(self.args["autopeak"], bool)
and "model_type" in self.args["autopeak"]
):
_model = self.args["autopeak"]["model_type"].lower()
ReferenceKeys().automodel_check(model=_model)
models = _model
else:
models = "gaussian"
if models == "gaussian":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmg_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "lorentzian":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhml_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "voigt":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmv_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "pseudovoigt":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmg_{i}",
value=0.5 * _fhmw,
min=0,
max=_fhmw,
vary=True,
)
self.params.add(
f"{models}_fwhml_{i}",
value=0.5 * _fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
self.args["auto_generated_models"] = {
"models": {
key: {
"value": self.params[key].value,
"min": self.params[key].min,
"max": self.params[key].max,
"vary": self.params[key].vary,
}
for key in self.params
},
"positions": positions.tolist(),
"properties": {key: value.tolist() for key, value in properties.items()},
}
def define_parameters(self) -> None:
"""Define the input parameters for a `params`-dictionary for classic fitting."""
for key_1, value_1 in self.args["peaks"].items():
self.define_parameters_loop(key_1=key_1, value_1=value_1)
def define_parameters_loop(self, key_1: str, value_1: Dict[str, Any]) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
value_1 (Dict[str, Any]): The value of the first level of the input
dictionary.
"""
for key_2, value_2 in value_1.items():
self.define_parameters_loop_2(key_1=key_1, key_2=key_2, value_2=value_2)
def define_parameters_loop_2(
self, key_1: str, key_2: str, value_2: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
value_2 (Dict[str, Any]): The value of the second level of the input
dictionary.
"""
for key_3, value_3 in value_2.items():
self.define_parameters_loop_3(
key_1=key_1, key_2=key_2, key_3=key_3, value_3=value_3
)
def define_parameters_loop_3(
self, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
self.params.add(f"{key_2}_{key_3}_{key_1}", **value_3)
def define_parameters_global(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting."""
for col_i in range(self.col_len):
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
self._define_parameter(
col_i=col_i,
key_1=key_1,
key_2=key_2,
key_3=key_3,
value_3=value_3,
)
def _define_parameter(
self, col_i: int, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.
Args:
col_i (int): The column index.
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
if col_i:
if key_3 != "amplitude":
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
expr=f"{key_2}_{key_3}_{key_1}_1",
)
else:
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
**value_3,
)
else:
self.params.add(f"{key_2}_{key_3}_{key_1}_1", **value_3)
def define_parameters_global_pre(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.
!!! warning "About `params` for global fitting"
`define_parameters_global_pre` requires fully defined `params`-dictionary
in the json, toml, or yaml file input. This means:
1. Number of the spectra must be defined.
2. Number of the peaks must be defined.
3. Number of the parameters must be defined.
4. The parameters must be defined.
"""
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
for key_4, value_4 in value_3.items():
self.params.add(f"{key_3}_{key_4}_{key_2}_{key_1}", **value_4)
class SolverModels(ModelParameters):
"""Solving models for 2D and 3D data sets.
!!! hint "Solver Modes"
* `"2D"`: Solve 2D models via the classic `lmfit` function.
* `"3D"`: Solve 3D models via global git. For the `global-fitting` procedure,
the `lmfit` function is used to solve the models with an extended set of
parameters.
the `lmfit` function is used.
"""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize the solver modes.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
super().__init__(df=df, args=args)
self.args_solver = SolverModelsAPI(**args).dict()
self.args_global = GlobalFittingAPI(**args).dict()
self.params = self.return_params
def __call__(self) -> Tuple[Minimizer, Any]:
"""Solve the fitting model.
Returns:
Tuple[Minimizer, Any]: Minimizer class and the fitting results.
"""
if self.args_global["global_"]:
minimizer = Minimizer(
self.solve_global_fitting,
params=self.params,
fcn_args=(self.x, self.data),
**self.args_solver["minimizer"],
)
else:
minimizer = Minimizer(
self.solve_local_fitting,
params=self.params,
fcn_args=(self.x, self.data),
**self.args_solver["minimizer"],
)
return (minimizer, minimizer.minimize(**self.args_solver["optimizer"]))
@staticmethod
def solve_local_fitting(
params: Dict[str, Parameters],
x: NDArray[np.float64],
data: NDArray[np.float64],
) -> NDArray[np.float64]:
r"""Solving the fitting problem.
!!! note "About implemented models"
`solve_local_fitting` is a wrapper function for the calling the implemented
moldels. Based on the `params` dictionary, the function calls the
corresponding models and merge them to the general model with will be
optimized by the `lmfit`-optimizer.
Currently the following models are supported:
- [Gaussian](https://en.wikipedia.org/wiki/Gaussian_function)
- [Lorentzian](https://en.wikipedia.org/wiki/Cauchy_distribution)
also known as Cauchy distribution
- [Voigt](https://en.wikipedia.org/wiki/Voigt_profile)
- [Pseudo Voigt][1]
- Exponential
- [power][2] (also known as Log-parabola or just power)
- Linear
- Constant
- [Error Function](https://en.wikipedia.org/wiki/Error_function)
- [Arcus Tangens][3]
- Logarithmic
[1]: https://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_approximation
[2]: https://en.wikipedia.org/wiki/Power_law
[3]: https://en.wikipedia.org/wiki/Inverse_trigonometric_functions
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 1d-array.
Returns:
NDArray[np.float64]: The best-fitted data based on the proposed model.
"""
val = np.zeros(x.shape)
peak_kwargs: Dict[Tuple[str, str], Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
c_name = model.split("_")
peak_kwargs[(c_name[0], c_name[2])][c_name[1]] = params[model]
for key, _kwarg in peak_kwargs.items():
val += getattr(DistributionModels(), key[0])(x, **_kwarg)
return val - data
@staticmethod
def solve_global_fitting(
params: Dict[str, Parameters],
x: NDArray[np.float64],
data: NDArray[np.float64],
) -> NDArray[np.float64]:
r"""Solving the fitting for global problem.
!!! note "About implemented models"
`solve_global_fitting` is the global solution of `solve_local_fitting` a
wrapper function for the calling the implemented moldels. For the kind of
supported models see `solve_local_fitting`.
!!! note "About the global solution"
The global solution is a solution for the problem, where the `x`-values is
the energy, but the y-values are the intensities, which has to be fitted as
one unit. For this reason, the residual is calculated as the difference
between all the y-values and the global proposed solution. Later the
residual has to be flattened to a 1-dimensional array and minimized by the
`lmfit`-optimizer.
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 2D-array.
Returns:
NDArray[np.float64]: The best-fitted data based on the proposed model.
"""
val = np.zeros(data.shape)
peak_kwargs: Dict[Tuple[str, str, str], Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
c_name = model.split("_")
peak_kwargs[(c_name[0], c_name[2], c_name[3])][c_name[1]] = params[model]
for key, _kwarg in peak_kwargs.items():
i = int(key[2]) - 1
val[:, i] += getattr(DistributionModels(), key[0])(x, **_kwarg)
val -= data
return val.flatten()
def calculated_model(
params: Dict[str, Parameters],
x: NDArray[np.float64],
df: pd.DataFrame,
global_fit: int,
) -> pd.DataFrame:
r"""Calculate the single contributions of the models and add them to the dataframe.
!!! note "About calculated models"
`calculated_model` are also wrapper functions similar to `solve_model`. The
overall goal is to extract from the best parameters the single contributions in
the model. Currently, `lmfit` provides only a single model, so the best-fit.
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will be
extended by the single contribution of the model.
global_fit (int): If 1 or 2, the model is calculated for the global fit.
Returns:
pd.DataFrame: Extended dataframe containing the single contributions of the
models.
"""
peak_kwargs: Dict[Any, Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
p_name = model.split("_")
if global_fit:
peak_kwargs[(p_name[0], p_name[2], p_name[3])][p_name[1]] = params[model]
else:
peak_kwargs[(p_name[0], p_name[2])][p_name[1]] = params[model]
_df = df.copy()
for key, _kwarg in peak_kwargs.items():
c_name = f"{key[0]}_{key[1]}_{key[2]}" if global_fit else f"{key[0]}_{key[1]}"
_df[c_name] = getattr(DistributionModels(), key[0])(x, **_kwarg)
return _df
<|code_end|>
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "0.16.4"
<|code_end|>
spectrafit/api/models_model.py
<|code_start|>"""Reference model for the API of the models distributions."""
from typing import Callable
from typing import List
from typing import Optional
from pydantic import BaseModel
from pydantic import Field
__description__ = "Lmfit expression for explicit dependencies."
class AmplitudeAPI(BaseModel):
"""Definition of the amplitude of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum amplitude.")
min: Optional[int] = Field(default=None, description="Minimum amplitude.")
vary: bool = Field(default=True, description="Vary the amplitude.")
value: Optional[float] = Field(default=None, description="Initial Amplitude value.")
expr: Optional[str] = Field(default=None, description=__description__)
class CenterAPI(BaseModel):
"""Definition of the center of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum center.")
min: Optional[int] = Field(default=None, description="Minimum center.")
vary: bool = Field(default=True, description="Vary the center.")
value: Optional[float] = Field(default=None, description="Initial Center value.")
expr: Optional[str] = Field(default=None, description=__description__)
class FwhmgAPI(BaseModel):
"""Definition of the FWHM Gaussian of the models distributions."""
max: Optional[float] = Field(
default=None,
description="Maximum Full Width Half Maximum of the Gaussian Distribution.",
)
min: Optional[int] = Field(
default=None,
description="Minimum Full Width Half Maximum of the Gaussian Distribution.",
)
vary: bool = Field(
default=True,
description="Vary the Full Width Half Maximum of the Gaussian Distribution.",
)
value: Optional[float] = Field(
default=None,
description="Initial Full Width Half Maximum of "
"the Gaussian Distribution value.",
)
expr: Optional[str] = Field(default=None, description=__description__)
class FwhmlAPI(BaseModel):
"""Definition of the FWHM Lorentzian of the models distributions."""
max: Optional[float] = Field(
default=None,
description="Maximum Full Width Half Maximum of the Lorentzian Distribution.",
)
min: Optional[int] = Field(
default=None,
description="Minimum Full Width Half Maximum of the Lorentzian Distribution.",
)
vary: bool = Field(
default=True,
description="Vary the Full Width Half Maximum of the Lorentzian Distribution.",
)
value: Optional[float] = Field(
default=None,
description="Initial Full Width Half Maximum of "
"the Lorentzian Distribution value.",
)
expr: Optional[str] = Field(default=None, description=__description__)
class FwhmvAPI(BaseModel):
"""Definition of the FWHM Voigt of the models distributions."""
max: Optional[float] = Field(
default=None,
description="Maximum Full Width Half Maximum of the Voigt Distribution.",
)
min: Optional[int] = Field(
default=None,
description="Minimum Full Width Half Maximum of the Voigt Distribution.",
)
vary: bool = Field(
default=True,
description="Vary the Full Width Half Maximum of the Voigt Distribution.",
)
value: Optional[float] = Field(
default=None,
description="Initial Full Width Half Maximum of the Voigt Distribution value.",
)
expr: Optional[str] = Field(default=None, description=__description__)
class GammaAPI(BaseModel):
"""Definition of the Gamma of the Voigt of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum gamma.")
min: Optional[int] = Field(default=None, description="Minimum gamma.")
vary: bool = Field(default=True, description="Vary the gamma.")
value: Optional[float] = Field(default=None, description="Initial Gamma value.")
expr: Optional[str] = Field(default=None, description=__description__)
class DecayAPI(BaseModel):
"""Definition of the Decay of the Exponential of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum decay rate.")
min: Optional[int] = Field(default=None, description="Minimum decay rate.")
vary: bool = Field(default=True, description="Vary the decay rate.")
value: Optional[float] = Field(
default=None, description="Initial decay rate value."
)
expr: Optional[str] = Field(default=None, description=__description__)
class InterceptAPI(BaseModel):
"""Definition of the Intercept of the Linear of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum intercept.")
min: Optional[int] = Field(default=None, description="Minimum intercept.")
vary: bool = Field(default=True, description="Vary the intercept.")
value: Optional[float] = Field(default=None, description="Initial intercept value.")
expr: Optional[str] = Field(default=None, description=__description__)
class ExponentAPI(BaseModel):
"""Definition of the Exponent of the Linear of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum exponent.")
min: Optional[int] = Field(default=None, description="Minimum exponent.")
vary: bool = Field(default=True, description="Vary the exponent.")
value: Optional[float] = Field(default=None, description="Initial exponent value.")
expr: Optional[str] = Field(default=None, description=__description__)
class SlopeAPI(BaseModel):
"""Definition of the Slope of the Linear of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum slope.")
min: Optional[int] = Field(default=None, description="Minimum slope.")
vary: bool = Field(default=True, description="Vary the slope.")
value: Optional[float] = Field(default=None, description="Inital slope value.")
expr: Optional[str] = Field(default=None, description=__description__)
class SigmaAPI(BaseModel):
"""Definition of the Sigma of the models distributions."""
max: Optional[float] = Field(default=None, description="Maximum sigma.")
min: Optional[int] = Field(default=None, description="Minimum sigma.")
vary: bool = Field(default=True, description="Vary the sigma.")
value: Optional[float] = Field(default=None, description="Initial sigma value.")
expr: Optional[str] = Field(default=None, description=__description__)
class PseudovoigtAPI(BaseModel):
"""Definition of the Pseudovoigt of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
fwhmg: FwhmgAPI = FwhmgAPI()
fwhml: FwhmlAPI = FwhmlAPI()
class GaussianAPI(BaseModel):
"""Definition of the Gaussian of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
fwhmg: FwhmgAPI = FwhmgAPI()
class LorentzianAPI(BaseModel):
"""Definition of the Lorentzian of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
fwhml: FwhmlAPI = FwhmlAPI()
class VoigtAPI(BaseModel):
"""Definition of the Voigt of the models distributions."""
center: CenterAPI = CenterAPI()
fwhmv: FwhmvAPI = FwhmvAPI()
gamma: GammaAPI = GammaAPI()
class ExponentialAPI(BaseModel):
"""Definition of the Exponential of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
decay: DecayAPI = DecayAPI()
intercept: InterceptAPI = InterceptAPI()
class PowerAPI(BaseModel):
"""Definition of the Power of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
exponent: ExponentAPI = ExponentAPI()
intercept: InterceptAPI = InterceptAPI()
class LinearAPI(BaseModel):
"""Definition of the Linear of the models distributions."""
slope: SlopeAPI = SlopeAPI()
intercept: InterceptAPI = InterceptAPI()
class ConstantAPI(BaseModel):
"""Definition of the Constant of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
class ErfAPI(BaseModel):
"""Definition of the Step of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
sigma: SigmaAPI = SigmaAPI()
class HeavisideAPI(BaseModel):
"""Definition of the Step of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
sigma: SigmaAPI = SigmaAPI()
class AtanAPI(BaseModel):
"""Definition of the Step of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
sigma: SigmaAPI = SigmaAPI()
class LogAPI(BaseModel):
"""Definition of the Step of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
sigma: SigmaAPI = SigmaAPI()
class CGaussAPI(BaseModel):
"""Definition of the CGauss of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
fwhmg: FwhmgAPI = FwhmgAPI()
class CLorentzAPI(BaseModel):
"""Definition of the CLorentz of the models distributions."""
amplitude: AmplitudeAPI = AmplitudeAPI()
center: CenterAPI = CenterAPI()
fwhml: FwhmlAPI = FwhmlAPI()
class CVoigtAPI(BaseModel):
"""Definition of the CVoigt of the models distributions."""
center: CenterAPI = CenterAPI()
fwhmv: FwhmvAPI = FwhmvAPI()
gamma: GammaAPI = GammaAPI()
class DistributionModelAPI(BaseModel):
"""Definition of the models distributions."""
gaussian: GaussianAPI = GaussianAPI()
lorentzian: LorentzianAPI = LorentzianAPI()
voigt: VoigtAPI = VoigtAPI()
pseudovoigt: PseudovoigtAPI = PseudovoigtAPI()
exponential: ExponentialAPI = ExponentialAPI()
power: PowerAPI = PowerAPI()
linear: LinearAPI = LinearAPI()
constant: ConstantAPI = ConstantAPI()
erf: ErfAPI = ErfAPI()
heaviside: HeavisideAPI = HeavisideAPI()
atan: AtanAPI = AtanAPI()
log: LogAPI = LogAPI()
cgauss: CGaussAPI = CGaussAPI()
clorentz: CLorentzAPI = CLorentzAPI()
cvoigt: CVoigtAPI = CVoigtAPI()
class ConfIntervalAPI(BaseModel):
"""Definition of Confidence Interval Function."""
p_names: Optional[List[str]] = Field(
default=None, description="List of parameters names."
)
trace: bool = Field(
default=True, description="Trace of the confidence interfall matrix."
)
maxiter: int = Field(
default=200,
gt=1,
le=2000,
description="Maximum number of iteration",
)
verbose: bool = Field(
default=False, description="Print information about the fit process."
)
prob_func: Optional[Callable[[float], float]] = Field(
default=None, description="Probing function."
)
<|code_end|>
spectrafit/models.py
<|code_start|>"""Minimization models for curve fitting."""
from collections import defaultdict
from dataclasses import dataclass
from math import log
from math import pi
from math import sqrt
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from lmfit import Minimizer
from lmfit import Parameters
from numpy.typing import NDArray
from scipy.signal import find_peaks
from scipy.special import erf
from scipy.special import wofz
from scipy.stats import hmean
from spectrafit.api.models_model import DistributionModelAPI
from spectrafit.api.tools_model import AutopeakAPI
from spectrafit.api.tools_model import GlobalFittingAPI
from spectrafit.api.tools_model import SolverModelsAPI
class DistributionModels:
"""Distribution models for the fit.
!!! note "About distribution models"
`DistributionModels` are wrapper functions for the distribution models. The
overall goal is to extract from the best parameters the single contributions in
the model. The superposition of the single contributions is the final model.
!!! note "About the cumulative distribution"
The cumulative distribution is the sum of the single contributions. The
cumulative distribution is the model that is fitted to the data. In contrast to
the single contributions, the cumulative distribution is not normalized and
therefore the amplitude of the single contributions is not directly comparable
to the amplitude of the cumulative distribution. Also, the cumulative
distributions are consquently using the `fwhm` parameter instead of the
`sigma` parameter.
"""
def gaussian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Gaussian distribution.
$$
{\displaystyle g(x)={\frac {1}{\sigma {\sqrt {2\pi }}}}\exp
( -{\frac {1}{2}}{\frac {(x-\mu )^{2}}{\sigma ^{2}}} ) }
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Gaussian distribution.
Defaults to 1.0.
center (float, optional): Center of the Gaussian distribution.
Defaults to 0.0.
fwhmg (float, optional): Full width at half maximum (FWHM) of the Gaussian
distribution. Defaults to 1.0.
Returns:
NDArray[np.float64]: Gaussian distribution of `x` given.
"""
sigma = fwhmg * Constants.fwhmg2sig
return np.array(amplitude / (Constants.sq2pi * sigma)) * np.exp(
-((1.0 * x - center) ** 2) / (2 * sigma**2)
)
def lorentzian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Lorentzian distribution.
$$
f(x;x_{0},\gamma )={\frac {1}{\pi \gamma
[ 1+ ( {\frac {x-x_{0}}{\gamma }})^{2} ]
}} ={1 \over \pi \gamma } [ {\gamma ^{2} \over (x-x_{0})^{2}+\gamma ^{2}} ]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Lorentzian distribution.
Defaults to 1.0.
center (float, optional): Center of the Lorentzian distribution. Defaults to
0.0.
fwhml (float, optional): Full width at half maximum (FWHM) of the Lorentzian
distribution. Defaults to 1.0.
Returns:
Union[NDArray[np.float64], float]: Lorentzian distribution of `x` given.
"""
sigma = fwhml * Constants.fwhml2sig
return np.array(amplitude / (1 + ((1.0 * x - center) / sigma) ** 2)) / (
pi * sigma
)
def voigt(
self,
x: NDArray[np.float64],
center: float = 0.0,
fwhmv: float = 1.0,
gamma: Optional[float] = None,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Voigt distribution.
$$
{\displaystyle V(x;\sigma ,\gamma )\equiv
\int_{-\infty }^{\infty }G(x';\sigma )
L(x-x';\gamma )\,dx'}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
center (float, optional): Center of the Voigt distribution. Defaults to 0.0.
fwhmv (float, optional): Full width at half maximum (FWHM) of the Lorentzian
distribution. Defaults to 1.0.
gamma (float, optional): Scaling factor of the complex part of the
[Faddeeva Function](https://en.wikipedia.org/wiki/Faddeeva_function).
Defaults to None.
Returns:
NDArray[np.float64]: Voigt distribution of `x` given.
"""
sigma = fwhmv * Constants.fwhmv2sig
if gamma is None:
gamma = sigma
z = (x - center + 1j * gamma) / (sigma * Constants.sq2)
return np.array(wofz(z).real / (sigma * Constants.sq2pi))
def pseudovoigt(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional Pseudo-Voigt distribution.
!!! note "See also:"
J. Appl. Cryst. (2000). 33, 1311-1316
https://doi.org/10.1107/S0021889800010219
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Pseudo-Voigt distribution.
Defaults to 1.0.
center (float, optional): Center of the Pseudo-Voigt distribution.
Defaults to 0.0.
fwhmg (float, optional): Full width half maximum of the Gaussian
distribution in the Pseudo-Voigt distribution. Defaults to 1.0.
fwhml (float, optional): Full width half maximum of the Lorentzian
distribution in the Pseudo-Voigt distribution. Defaults to 1.0.
Returns:
NDArray[np.float64]: Pseudo-Voigt distribution of `x` given.
"""
f = np.power(
fwhmg**5
+ 2.69269 * fwhmg**4 * fwhml
+ 2.42843 * fwhmg**3 * fwhml**2
+ 4.47163 * fwhmg**2 * fwhml**3
+ 0.07842 * fwhmg * fwhml**4
+ fwhml**5,
0.2,
)
n = (
1.36603 * (fwhml / f)
- 0.47719 * (fwhml / f) ** 2
+ 0.11116 * (fwhml / f) ** 3
)
return np.array(
n * self.lorentzian(x=x, amplitude=amplitude, center=center, fwhml=fwhml)
+ (1 - n)
* self.gaussian(x=x, amplitude=amplitude, center=center, fwhmg=fwhmg)
)
def exponential(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
decay: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional exponential decay.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the exponential function.
Defaults to 1.0.
decay (float, optional): Decay of the exponential function. Defaults to 1.0.
intercept (float, optional): Intercept of the exponential function.
Defaults to 0.0.
Returns:
NDArray[np.float64]: Exponential decay of `x` given.
"""
return np.array(amplitude * np.exp(-x / decay) + intercept)
def power(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
exponent: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional power function.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the power function. Defaults to
1.0.
exponent (float, optional): Exponent of the power function. Defaults to 1.0.
intercept (float, optional): Intercept of the power function. Defaults to
0.0.
Returns:
NDArray[np.float64]: power function of `x` given.
"""
return np.array(amplitude * np.power(x, exponent) + intercept)
def linear(
self,
x: NDArray[np.float64],
slope: float = 1.0,
intercept: float = 0.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional linear function.
Args:
x (NDArray[np.float64]): `x`-values of the data.
slope (float, optional): Slope of the linear function. Defaults to 1.0.
intercept (float, optional): Intercept of the linear function.
Defaults to 0.0.
Returns:
NDArray[np.float64]: Linear function of `x` given.
"""
return np.array(slope * x + intercept)
def constant(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
) -> NDArray[np.float64]:
"""Return a 1-dimensional constant value.
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the constant. Defaults to 1.0.
Returns:
NDArray[np.float64]: Constant value of `x` given.
"""
return np.array(np.linspace(amplitude, amplitude, len(x)))
@staticmethod
def _norm(
x: NDArray[np.float64], center: float, sigma: float
) -> NDArray[np.float64]:
"""Normalize the data for step functions.
Args:
x (NDArray[np.float64]): `x`-values of the data.
center (float): Center of the step function.
sigma (float): Sigma of the step function.
Returns:
NDArray[np.float64]: Normalized data.
"""
if abs(sigma) < 1.0e-13:
sigma = 1.0e-13
return np.subtract(x, center) / sigma
def erf(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional error function.
$$
f(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the error function.
Defaults to 1.0.
center (float, optional): Center of the error function. Defaults to 0.0.
sigma (float, optional): Sigma of the error function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Error function of `x` given.
"""
return np.array(amplitude * 0.5 * (1 + erf(self._norm(x, center, sigma))))
def heaviside(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional Heaviside step function.
$$
f(x) = \begin{cases}
0 & x < 0 \\
0.5 & x = 0 \\
1 & x > 0
\end{cases}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Heaviside step function.
Defaults to 1.0.
center (float, optional): Center of the Heaviside step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the Heaviside step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Heaviside step function of `x` given.
"""
return np.array(amplitude * 0.5 * (1 + np.sign(self._norm(x, center, sigma))))
def atan(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional arctan step function.
$$
f(x) = \frac{1}{\pi} \arctan(\frac{x - c}{s})
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the arctan step function.
Defaults to 1.0.
center (float, optional): Center of the arctan step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the arctan step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Arctan step function of `x` given.
"""
return np.array(
amplitude * 0.5 * (1 + np.arctan(self._norm(x, center, sigma)) / pi)
)
def log(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
sigma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional logarithmic step function.
$$
f(x) = \frac{1}{1 + e^{-\frac{x - c}{s}}}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the logarithmic step function.
Defaults to 1.0.
center (float, optional): Center of the logarithmic step function.
Defaults to 0.0.
sigma (float, optional): Sigma of the logarithmic step function.
Defaults to 1.0.
Returns:
NDArray[np.float64]: Logarithmic step function of `x` given.
"""
return np.array(
amplitude * 0.5 * (1 + np.log(self._norm(x, center, sigma)) / pi)
)
def cgaussian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmg: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Gaussian function.
$$
f(x) = \frac{1}{2} \left[1 + erf\left(\frac{x - c}{s \sqrt{2}}\right)\right]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Gaussian function. Defaults to
1.0.
center (float, optional): Center of the Gaussian function. Defaults to 0.0.
fwhmg (float, optional): Full width at half maximum of the Gaussian
function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Gaussian function of `x` given.
"""
sigma = fwhmg * Constants.fwhmg2sig
return np.array(
amplitude * 0.5 * (1 + erf((x - center) / (sigma * np.sqrt(2.0))))
)
def clorentzian(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhml: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Lorentzian function.
$$
f(x) = \frac{1}{\pi} \arctan\left(\frac{x - c}{s}\right) + \frac{1}{2}
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Lorentzian function.
Defaults to 1.0.
center (float, optional): Center of the Lorentzian function.
Defaults to 0.0.
fwhml (float, optional): Full width at half maximum of the Lorentzian
function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Lorentzian function of `x` given.
"""
sigma = fwhml * Constants.fwhml2sig
return np.array(amplitude * (np.arctan((x - center) / sigma) / pi) + 0.5)
def cvoigt(
self,
x: NDArray[np.float64],
amplitude: float = 1.0,
center: float = 0.0,
fwhmv: float = 1.0,
gamma: float = 1.0,
) -> NDArray[np.float64]:
r"""Return a 1-dimensional cumulative Voigt function.
$$
f(x) = \frac{1}{2} \left[1 + erf\left(\frac{x - c}{s \sqrt{2}}\right)\right]
$$
Args:
x (NDArray[np.float64]): `x`-values of the data.
amplitude (float, optional): Amplitude of the Voigt function. Defaults to
1.0.
center (float, optional): Center of the Voigt function. Defaults to 0.0.
fwhmv (float, optional): Full width at half maximum of the Voigt function.
Defaults to 1.0.
gamma (float, optional): Gamma of the Voigt function. Defaults to 1.0.
Returns:
NDArray[np.float64]: Cumulative Voigt function of `x` given.
"""
sigma = fwhmv * Constants.fwhmv2sig
return np.array(
amplitude
* 0.5
* (1 + erf((x - center) / (sigma * np.sqrt(2.0))))
* np.exp(-(((x - center) / gamma) ** 2))
)
@dataclass(frozen=True)
class ReferenceKeys:
"""Reference keys for model fitting and peak detection."""
__models__ = list(DistributionModelAPI.schema()["properties"].keys())
__automodels__ = [
"gaussian",
"lorentzian",
"voigt",
"pseudovoigt",
]
def model_check(self, model: str) -> None:
"""Check if model is available.
Args:
model (str): Model name.
Raises:
KeyError: If the model is not supported.
"""
if model.split("_")[0] not in self.__models__:
raise NotImplementedError(f"{model} is not supported!")
def automodel_check(self, model: str) -> None:
"""Check if model is available.
Args:
model (str): Auto Model name (gaussian, lorentzian, voigt, or pseudovoigt).
Raises:
KeyError: If the model is not supported.
"""
if model not in self.__automodels__:
raise KeyError(f"{model} is not supported!")
def detection_check(self, args: Dict[str, Any]) -> None:
"""Check if detection is available.
Args:
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Raises:
KeyError: If the key is not parameter of the `scipy.signal.find_peaks`
function. This will be checked via `pydantic` in `spectrafit.api`.
"""
AutopeakAPI(**args)
@dataclass(frozen=True)
class Constants:
r"""Mathematical constants for the curve models.
!!! info "Constants"
1. Natural logarithm of 2
$$
ln2 = \log{2}
$$
2. Square root of 2 times pi
$$
sq2pi = \sqrt{2 \pi}
$$
3. Square root of pi
$$
sqpi = \sqrt{ \pi}
$$
4. Square root of 2
$$
sq2 = \sqrt{2}
$$
5. Full width at half maximum to sigma for Gaussian
$$
fwhmg2sig = \frac{1}{ 2 \sqrt{2\log{2}}}
$$
6. Full width at half maximum to sigma for Lorentzian
$$
fwhml2sig = \frac{1}{2}
$$
7. Full width at half maximum to sigma for Voigt according to the article by
Olivero and Longbothum[^1], check also
[XPSLibary website](https://xpslibrary.com/voigt-peak-shape/).
$$
fwhm_{\text{Voigt}} \approx 0.5346 \cdot fwhm_{\text{Gaussian}} +
\sqrt{ 0.2166 fwhm_{\text{Lorentzian}}^2 + fwhm_{\text{Gaussian}}^2 }
$$
In case of equal FWHM for Gaussian and Lorentzian, the Voigt FWHM can be
defined as:
$$
fwhm_{\text{Voigt}} \approx 1.0692 + 2 \sqrt{0.2166 + 2 \ln{2}} \cdot \sigma
$$
$$
fwhmv2sig = \frac{1}{fwhm_{\text{Voigt}}}
$$
[^1]:
J.J. Olivero, R.L. Longbothum,
_Empirical fits to the Voigt line width: A brief review_,
**Journal of Quantitative Spectroscopy and Radiative Transfer**,
Volume 17, Issue 2, 1977, Pages 233-236, ISSN 0022-4073,
https://doi.org/10.1016/0022-4073(77)90161-3.
"""
ln2 = log(2.0)
sq2pi = sqrt(2.0 * pi)
sqpi = sqrt(pi)
sq2 = sqrt(2.0)
fwhmg2sig = 1 / (2.0 * sqrt(2.0 * log(2.0)))
fwhml2sig = 1 / 2.0
fwhmv2sig = 1 / (2 * 0.5346 + 2 * sqrt(0.2166 + log(2) * 2))
class AutoPeakDetection:
"""Automatic detection of peaks in a spectrum."""
def __init__(
self,
x: NDArray[np.float64],
data: NDArray[np.float64],
args: Dict[str, Any],
) -> None:
"""Initialize the AutoPeakDetection class.
Args:
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 1d-array.
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
self.x = x
self.data = data
self._args = args["autopeak"]
@staticmethod
def check_key_exists(
key: str, args: Dict[str, Any], value: Union[float, Tuple[Any, Any]]
) -> Any:
"""Check if a key exists in a dictionary.
Please check for the reference key also [scipy.signal.find_peaks][1].
[1]:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
Args:
key (str): Reference key of `scipy.signal.find_peaks`.
args (Dict[str, Any]): Reference values of `scipy.signal.find_peaks`, if not
defined will be set to estimated default values.
value (Union[float, Tuple[float,float]]): Default value for the reference
key.
Returns:
Any: The reference value for `scipy.signal.find_peaks`.
"""
return args.get(key, value)
@property
def estimate_height(self) -> Tuple[float, float]:
r"""Estimate the initial height based on an inverse noise ratio of a signal.
!!! info "About the estimation of the height"
The lower end of the height is the inverse noise ratio of the `data`, and
upper limit is the maximum value of the `data`. The noise ratio of the
`data` is based on the original implementation by `SciPy`:
```python
def signaltonoise(a, axis=0, ddof=0):
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m / sd)
```
Returns:
Tuple[float, float]: Tuple of the inverse signal to noise ratio and
the maximum value of the `data`.
"""
return 1 - self.data.mean() / self.data.std(), self.data.max()
@property
def estimate_threshold(self) -> Tuple[float, float]:
"""Estimate the threshold value for the peak detection.
Returns:
Tuple[float, float]: Minimum and maximum value of the spectrum `data`,
respectively, `intensity`.
"""
return self.data.min(), self.data.max()
@property
def estimate_distance(self) -> float:
"""Estimate the initial distance between peaks.
Returns:
float: Estimated distance between peaks.
"""
min_step = np.diff(self.x).min()
return max(min_step, 1.0)
@property
def estimate_prominence(self) -> Tuple[float, float]:
"""Estimate the prominence of a peak.
!!! info "About the estimation of the prominence"
The prominence is the difference between the height of the peak and the
bottom. To get a estimate of the prominence, the height of the peak is
calculated by maximum value of the `data` and the bottom is calculated by
the harmonic mean of the `data`.
Returns:
Tuple[float, float]: Tuple of the harmonic-mean and maximum value of `data`.
"""
try:
return hmean(self.data), self.data.max()
except ValueError as exc:
print(f"{exc}: Using standard arithmetic mean of NumPy.\n")
return self.data.mean(), self.data.max()
@property
def estimated_width(self) -> Tuple[float, float]:
"""Estimate the width of a peak.
!!! info "About the estimation of the width"
The width of a peak is estimated for a lower and an upper end. For the lower
end, the minimum stepsize is used. For the upper end, the stepsize between
the half maximum and the minimum value of the `data` is used as the width.
Returns:
Tuple[float, float]: Estimated width lower and uper end of the peaks.
"""
return (
np.diff(self.x).min(),
np.abs(self.x[self.data.argmax()] - self.x[self.data.argmin()]) / 2,
)
@property
def estimated_rel_height(self) -> float:
"""Estimate the relative height of a peak.
!!! info "About the estimation of the relative height"
The relative height of a peak is approximated by the difference of the
harmonic mean value of the `data` and the minimum value of the `data`
divided by the factor of `4`. In case of negative ratios, the value will be
set to `Zero`.
Returns:
float: Estimated relative height of a peak.
"""
try:
rel_height = (hmean(self.data) - self.data.min()) / 4
except ValueError as exc:
print(f"{exc}: Using standard arithmetic mean of NumPy.\n")
rel_height = (self.data.mean() - self.data.min()) / 4
return rel_height if rel_height > 0 else 0.0
@property
def estimated_wlen(self) -> float:
r"""Estimate the window length for the peak detection.
!!! info "About the estimation of the window length"
The window length is the length of the window for the peak detection is
defined to be 1% of the length of the `data`, consequently the len of the
`data` is divided by 100. In case of a window length smaller than 1, the
window length will be set to numerical value of 1, which is defined by
`1 + 1e-9`.
Returns:
float: Estimated window length is set to the numeric value of > 1.
"""
wlen = self.data.size / 100
return wlen if wlen > 1.0 else 1 + 1e-9
@property
def estimated_plateau_size(self) -> Tuple[float, float]:
"""Estimate the plateau size for the peak detection.
Returns:
Tuple[float, float]: Estimated plateau size is set to `zero` for the lower
end and the maximum value of the `x` for the upper end.
"""
return 0.0, self.x.max()
def initialize_peak_detection(self) -> None:
"""Initialize the peak detection.
!!! note "Initialize the peak detection"
This method is used to initialize the peak detection. The initialization can
be activated by setting the `initialize` attribute to `True`, which will
automatically estimate the default parameters for the peak detection. In
case of the `initialize` attribute is defined as dictionary, the proposed
values are taken from the dictionary if th
Raise:
TypeError: If the `initialize` attribute is not of type `bool` or `dict`.
"""
if isinstance(self._args, bool):
self.default_values()
elif isinstance(self._args, dict):
ReferenceKeys().detection_check(self._args)
self.height = self.check_key_exists(
key="height", args=self._args, value=self.estimate_height
)
self.threshold = self.check_key_exists(
key="threshold", args=self._args, value=self.estimate_threshold
)
self.distance = self.check_key_exists(
key="distance", args=self._args, value=self.estimate_distance
)
self.prominence = self.check_key_exists(
key="prominence", args=self._args, value=self.estimate_prominence
)
self.width = self.check_key_exists(
key="width", args=self._args, value=self.estimated_width
)
self.wlen = self.check_key_exists(
key="wlen", args=self._args, value=self.estimated_wlen
)
self.rel_height = self.check_key_exists(
key="rel_height", args=self._args, value=self.estimated_rel_height
)
self.plateau_size = self.check_key_exists(
key="plateau_size", args=self._args, value=0.0
)
else:
raise TypeError(
f"The type of the `args` is not supported: {type(self._args)}"
)
def default_values(self) -> None:
"""Set the default values for the peak detection."""
self.height = self.estimate_height
self.threshold = self.estimate_threshold
self.distance = self.estimate_distance
self.prominence = self.estimate_prominence
self.width = self.estimated_width
self.wlen = self.estimated_wlen
self.rel_height = self.estimated_rel_height
self.plateau_size = 0
def __autodetect__(self) -> Any:
"""Return peak positions and properties."""
return find_peaks(
self.data,
height=self.height,
threshold=self.threshold,
distance=self.distance,
prominence=self.prominence,
width=self.width,
wlen=self.wlen,
rel_height=self.rel_height,
plateau_size=self.plateau_size,
)
class ModelParameters(AutoPeakDetection):
"""Class to define the model parameters."""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize the model parameters.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]):
Nested arguments dictionary for the model based on **one** or **two**
`int` keys depending if global fitting parameters, will explicit
defined or not.
!!! note "About `args` for models"
The `args` dictionary is used to define the model parameters. And the total
nested dictionary structure is as follows:
```python
args: Dict[str, Dict[int, Dict[str, Dict[str, Union[str, int, float]]]]]
```
!!! info "About the fitting options"
In general, there are two option for the fitting possible:
1. `Classic fitting` or `local fitting`, where the parameters are defined
for a 2D spectrum.
2. `Global fitting`, where the parameters are defined for a 3D spectrum.
Here, the parameters can be automatically defined for each column on the
basis of the initial parameters or they can be completley defined by the
user. The `global fitting` definition starts at `1` similiar to the
peaks attributes notation.
"""
self.col_len = df.shape[1] - 1
self.args = args
self.params = Parameters()
self.x, self.data = self.df_to_numvalues(df=df, args=args)
super().__init__(self.x, self.data, self.args)
def df_to_numvalues(
self, df: pd.DataFrame, args: Dict[str, Any]
) -> Tuple[NDArray[np.float64], NDArray[np.float64]]:
"""Transform the dataframe to numeric values of `x` and `data`.
!!! note "About the dataframe to numeric values"
The transformation is done by the `value` property of pandas. The dataframe
is separated into the `x` and `data` columns and the `x` column is
transformed to the energy values and the `data` column is transformed to
the intensity values depending on the `args` dictionary. In terms of global
fitting, the `data` contains the intensity values for each column.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
Returns:
Tuple[NDArray[np.float64], NDArray[np.float64]]: Tuple of `x` and
`data` as numpy arrays.
"""
if args["global_"]:
return (
df[args["column"][0]].to_numpy(),
df.loc[:, df.columns != args["column"][0]].to_numpy(),
)
return (df[args["column"][0]].to_numpy(), df[args["column"][1]].to_numpy())
@property
def return_params(self) -> Parameters:
"""Return the `class` representation of the model parameters.
Returns:
Parameters: Model parameters class.
"""
self.__perform__()
return self.params
def __str__(self) -> str:
"""Return the `string` representation of the model parameters.
Returns:
str: String representation of the model parameters.
"""
self.__perform__()
return str(self.params)
def __perform__(self) -> None:
"""Perform the model parameter definition.
Raises:
KeyError: Global fitting is combination with automatic peak detection is
not implemented yet.
"""
if self.args["global_"] == 0 and not self.args["autopeak"]:
self.define_parameters()
elif self.args["global_"] == 1 and not self.args["autopeak"]:
self.define_parameters_global()
elif self.args["global_"] == 2 and not self.args["autopeak"]:
self.define_parameters_global_pre()
elif self.args["global_"] == 0:
self.initialize_peak_detection()
self.define_parameters_auto()
elif self.args["global_"] in [1, 2]:
raise KeyError(
"Global fitting mode with automatic peak detection "
"is not supported yet."
)
def define_parameters_auto(self) -> None:
"""Auto define the model parameters for local fitting."""
positions, properties = self.__autodetect__()
if (
not isinstance(self.args["autopeak"], bool)
and "model_type" in self.args["autopeak"]
):
_model = self.args["autopeak"]["model_type"].lower()
ReferenceKeys().automodel_check(model=_model)
models = _model
else:
models = "gaussian"
if models == "gaussian":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmg_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "lorentzian":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhml_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "voigt":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmv_{i}",
value=_fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
elif models == "pseudovoigt":
for i, (_cent, _amp, _fhmw) in enumerate(
zip(
self.x[positions],
properties["peak_heights"],
properties["widths"],
),
start=1,
):
self.params.add(
f"{models}_amplitude_{i}",
value=_amp,
min=-np.abs(1.25 * _amp),
max=np.abs(1.25 * _amp),
vary=True,
)
self.params.add(
f"{models}_center_{i}",
value=_cent,
min=0.5 * _cent,
max=2 * _cent,
vary=True,
)
self.params.add(
f"{models}_fwhmg_{i}",
value=0.5 * _fhmw,
min=0,
max=_fhmw,
vary=True,
)
self.params.add(
f"{models}_fwhml_{i}",
value=0.5 * _fhmw,
min=0,
max=2 * _fhmw,
vary=True,
)
self.args["auto_generated_models"] = {
"models": {
key: {
"value": self.params[key].value,
"min": self.params[key].min,
"max": self.params[key].max,
"vary": self.params[key].vary,
}
for key in self.params
},
"positions": positions.tolist(),
"properties": {key: value.tolist() for key, value in properties.items()},
}
def define_parameters(self) -> None:
"""Define the input parameters for a `params`-dictionary for classic fitting."""
for key_1, value_1 in self.args["peaks"].items():
self.define_parameters_loop(key_1=key_1, value_1=value_1)
def define_parameters_loop(self, key_1: str, value_1: Dict[str, Any]) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
value_1 (Dict[str, Any]): The value of the first level of the input
dictionary.
"""
for key_2, value_2 in value_1.items():
self.define_parameters_loop_2(key_1=key_1, key_2=key_2, value_2=value_2)
def define_parameters_loop_2(
self, key_1: str, key_2: str, value_2: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
value_2 (Dict[str, Any]): The value of the second level of the input
dictionary.
"""
for key_3, value_3 in value_2.items():
self.define_parameters_loop_3(
key_1=key_1, key_2=key_2, key_3=key_3, value_3=value_3
)
def define_parameters_loop_3(
self, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.
Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
self.params.add(f"{key_2}_{key_3}_{key_1}", **value_3)
def define_parameters_global(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting."""
for col_i in range(self.col_len):
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
self._define_parameter(
col_i=col_i,
key_1=key_1,
key_2=key_2,
key_3=key_3,
value_3=value_3,
)
def _define_parameter(
self, col_i: int, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.
Args:
col_i (int): The column index.
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
if col_i:
if key_3 != "amplitude":
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
expr=f"{key_2}_{key_3}_{key_1}_1",
)
else:
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
**value_3,
)
else:
self.params.add(f"{key_2}_{key_3}_{key_1}_1", **value_3)
def define_parameters_global_pre(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.
!!! warning "About `params` for global fitting"
`define_parameters_global_pre` requires fully defined `params`-dictionary
in the json, toml, or yaml file input. This means:
1. Number of the spectra must be defined.
2. Number of the peaks must be defined.
3. Number of the parameters must be defined.
4. The parameters must be defined.
"""
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
for key_4, value_4 in value_3.items():
self.params.add(f"{key_3}_{key_4}_{key_2}_{key_1}", **value_4)
class SolverModels(ModelParameters):
"""Solving models for 2D and 3D data sets.
!!! hint "Solver Modes"
* `"2D"`: Solve 2D models via the classic `lmfit` function.
* `"3D"`: Solve 3D models via global git. For the `global-fitting` procedure,
the `lmfit` function is used to solve the models with an extended set of
parameters.
the `lmfit` function is used.
"""
def __init__(self, df: pd.DataFrame, args: Dict[str, Any]) -> None:
"""Initialize the solver modes.
Args:
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`).
args (Dict[str, Any]): The input file arguments as a dictionary with
additional information beyond the command line arguments.
"""
super().__init__(df=df, args=args)
self.args_solver = SolverModelsAPI(**args).dict()
self.args_global = GlobalFittingAPI(**args).dict()
self.params = self.return_params
def __call__(self) -> Tuple[Minimizer, Any]:
"""Solve the fitting model.
Returns:
Tuple[Minimizer, Any]: Minimizer class and the fitting results.
"""
if self.args_global["global_"]:
minimizer = Minimizer(
self.solve_global_fitting,
params=self.params,
fcn_args=(self.x, self.data),
**self.args_solver["minimizer"],
)
else:
minimizer = Minimizer(
self.solve_local_fitting,
params=self.params,
fcn_args=(self.x, self.data),
**self.args_solver["minimizer"],
)
return (minimizer, minimizer.minimize(**self.args_solver["optimizer"]))
@staticmethod
def solve_local_fitting(
params: Dict[str, Parameters],
x: NDArray[np.float64],
data: NDArray[np.float64],
) -> NDArray[np.float64]:
r"""Solving the fitting problem.
!!! note "About implemented models"
`solve_local_fitting` is a wrapper function for the calling the implemented
moldels. Based on the `params` dictionary, the function calls the
corresponding models and merge them to the general model with will be
optimized by the `lmfit`-optimizer.
Currently the following models are supported:
- [Gaussian](https://en.wikipedia.org/wiki/Gaussian_function)
- [Lorentzian](https://en.wikipedia.org/wiki/Cauchy_distribution)
also known as Cauchy distribution
- [Voigt](https://en.wikipedia.org/wiki/Voigt_profile)
- [Pseudo Voigt][1]
- Exponential
- [power][2] (also known as Log-parabola or just power)
- Linear
- Constant
- [Error Function](https://en.wikipedia.org/wiki/Error_function)
- [Arcus Tangens][3]
- Logarithmic
[1]: https://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_approximation
[2]: https://en.wikipedia.org/wiki/Power_law
[3]: https://en.wikipedia.org/wiki/Inverse_trigonometric_functions
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 1d-array.
Returns:
NDArray[np.float64]: The best-fitted data based on the proposed model.
"""
val = np.zeros(x.shape)
peak_kwargs: Dict[Tuple[str, str], Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
c_name = model.split("_")
peak_kwargs[(c_name[0], c_name[2])][c_name[1]] = params[model]
for key, _kwarg in peak_kwargs.items():
val += getattr(DistributionModels(), key[0])(x, **_kwarg)
return val - data
@staticmethod
def solve_global_fitting(
params: Dict[str, Parameters],
x: NDArray[np.float64],
data: NDArray[np.float64],
) -> NDArray[np.float64]:
r"""Solving the fitting for global problem.
!!! note "About implemented models"
`solve_global_fitting` is the global solution of `solve_local_fitting` a
wrapper function for the calling the implemented moldels. For the kind of
supported models see `solve_local_fitting`.
!!! note "About the global solution"
The global solution is a solution for the problem, where the `x`-values is
the energy, but the y-values are the intensities, which has to be fitted as
one unit. For this reason, the residual is calculated as the difference
between all the y-values and the global proposed solution. Later the
residual has to be flattened to a 1-dimensional array and minimized by the
`lmfit`-optimizer.
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
data (NDArray[np.float64]): `y`-values of the data as 2D-array.
Returns:
NDArray[np.float64]: The best-fitted data based on the proposed model.
"""
val = np.zeros(data.shape)
peak_kwargs: Dict[Tuple[str, str, str], Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
c_name = model.split("_")
peak_kwargs[(c_name[0], c_name[2], c_name[3])][c_name[1]] = params[model]
for key, _kwarg in peak_kwargs.items():
i = int(key[2]) - 1
val[:, i] += getattr(DistributionModels(), key[0])(x, **_kwarg)
val -= data
return val.flatten()
def calculated_model(
params: Dict[str, Parameters],
x: NDArray[np.float64],
df: pd.DataFrame,
global_fit: int,
) -> pd.DataFrame:
r"""Calculate the single contributions of the models and add them to the dataframe.
!!! note "About calculated models"
`calculated_model` are also wrapper functions similar to `solve_model`. The
overall goal is to extract from the best parameters the single contributions in
the model. Currently, `lmfit` provides only a single model, so the best-fit.
Args:
params (Dict[str, Parameters]): The best optimized parameters of the fit.
x (NDArray[np.float64]): `x`-values of the data.
df (pd.DataFrame): DataFrame containing the input data (`x` and `data`),
as well as the best fit and the corresponding residuum. Hence, it will be
extended by the single contribution of the model.
global_fit (int): If 1 or 2, the model is calculated for the global fit.
Returns:
pd.DataFrame: Extended dataframe containing the single contributions of the
models.
"""
peak_kwargs: Dict[Any, Parameters] = defaultdict(dict)
for model in params:
model = model.lower()
ReferenceKeys().model_check(model=model)
p_name = model.split("_")
if global_fit:
peak_kwargs[(p_name[0], p_name[2], p_name[3])][p_name[1]] = params[model]
else:
peak_kwargs[(p_name[0], p_name[2])][p_name[1]] = params[model]
_df = df.copy()
for key, _kwarg in peak_kwargs.items():
c_name = f"{key[0]}_{key[1]}_{key[2]}" if global_fit else f"{key[0]}_{key[1]}"
_df[c_name] = getattr(DistributionModels(), key[0])(x, **_kwarg)
return _df
<|code_end|>
|
[Docs]: Using builtin release drafter
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "0.16.4"
<|code_end|>
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "1.0.0a0"
<|code_end|>
|
[Docs]: Update release drafter
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
- Link to the complete changes of the latest release.
- Exclude auto commits in the contributor list
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "1.0.0a0"
<|code_end|>
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "1.0.0a1"
<|code_end|>
|
[Docs]: Using mike for versioning docs
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
https://squidfunk.github.io/mkdocs-material/setup/setting-up-versioning/
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "1.0.0a2"
<|code_end|>
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "1.0.0a3"
<|code_end|>
|
[Docs]: Update labeled criteria for CHANGELOG
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
Update the labeler criteria for avoiding double labels in `CHANGELOG.md`
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "1.0.0a7"
<|code_end|>
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "1.0.0a8"
<|code_end|>
|
[Bug]: ASCII Char in creating branch
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Is crashing
### Expected Behavior
Is realising a change in changeling
### Steps To Reproduce
_No response_
### ⚙️ Environment
```markdown
- OS:
- Python:
- spectrafit:
```
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "1.0.0b1"
<|code_end|>
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "1.0.0b2"
<|code_end|>
|
[Feature]: Add python 3.11 support
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Feature
Add python 3.11 support
### Possible Solution
_No response_
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "0.16.6"
<|code_end|>
| spectrafit/__init__.py
<|code_start|>"""SpectraFit, fast command line tool for fitting data."""
__version__ = "0.16.7"
<|code_end|>
|
SSL: CERTIFICATE_VERIFY_FAILED
Hi,
I don't know if you got notified about my last comment in the closed issue #81 . I cannot reopen it, so ill try it this way:
Sorry, I hadn't had the time to test it.
I have tested it now with the newest version 3.0.0b5 and HASS 0.64.3
self.get_state() works now without any problems.
When setting/updating a state (with self.set_state() or self.call_service()) I still receive the following error:
```
Exception in thread Thread-16:
Traceback (most recent call last):
File "/home/pi/.local/lib/python3.5/site-packages/urllib3/connectionpool.py", line 601, in urlopen
chunked=chunked)
File "/home/pi/.local/lib/python3.5/site-packages/urllib3/connectionpool.py", line 346, in _make_request
self._validate_conn(conn)
File "/home/pi/.local/lib/python3.5/site-packages/urllib3/connectionpool.py", line 850, in _validate_conn
conn.connect()
File "/home/pi/.local/lib/python3.5/site-packages/urllib3/connection.py", line 326, in connect
ssl_context=context)
File "/home/pi/.local/lib/python3.5/site-packages/urllib3/util/ssl_.py", line 329, in ssl_wrap_socket
return context.wrap_socket(sock, server_hostname=server_hostname)
File "/usr/lib/python3.5/ssl.py", line 385, in wrap_socket
_context=self)
File "/usr/lib/python3.5/ssl.py", line 760, in __init__
self.do_handshake()
File "/usr/lib/python3.5/ssl.py", line 996, in do_handshake
self._sslobj.do_handshake()
File "/usr/lib/python3.5/ssl.py", line 641, in do_handshake
self._sslobj.do_handshake()
ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:720)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/pi/.local/lib/python3.5/site-packages/requests/adapters.py", line 440, in send
timeout=timeout
File "/home/pi/.local/lib/python3.5/site-packages/urllib3/connectionpool.py", line 639, in urlopen
_stacktrace=sys.exc_info()[2])
File "/home/pi/.local/lib/python3.5/site-packages/urllib3/util/retry.py", line 388, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='hass.home.local', port=8123): Max retries exceeded with url: /api/services/input_number/set_value (Caused by SSLError(SSLError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:720)'),))
```
My AppDaemon config is:
```
appdaemon:
threads: 10
app_dir: '/srv/apps'
plugins:
HASS:
type: hass
ha_url: 'https://hass.home.local:8123'
ha_key: 'ABC'
cert_path: '/etc/ssl/certs/ca-certificates.crt'
cert_verify: False
```
Setting a state with curl works without any problem.
`curl -X POST -H "x-ha-access: ABC" -H "Content-Type: application/json" -d '{"attributes": {"friendly_name": "Volume", "icon": "mdi:volume-medium", "max": -20.0, "min": -100.0, "mode": "slider", "step": 0.5}, "entity_id": "input_number.volume", "state": "-30.0"}' "https://hass.home.local:8123/api/states/input_number.volume"`
| appdaemon/plugins/hass/hassapi.py
<|code_start|>import requests
import inspect
import appdaemon.appapi as appapi
import appdaemon.utils as utils
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#
# Define an entities class as a descriptor to enable read only access of HASS state
#
def hass_check(func):
def func_wrapper(*args, **kwargs):
self = args[0]
if not self.AD.get_plugin(self._get_namespace(**kwargs)).reading_messages:
self.AD.log("WARNING", "Attempt to call Home Assistant while disconnected: {}".format(func))
return lambda *args: None
else:
return func(*args, **kwargs)
return (func_wrapper)
class Hass(appapi.AppDaemon):
#
# Internal
#
def __init__(self, ad, name, logger, error, args, config, app_config, global_vars):
super(Hass, self).__init__(ad, name, logger, error, args, config, app_config, global_vars)
self.namespace = "default"
self.AD = ad
self.name = name
self._logger = logger
self._error = error
self.args = args
self.global_vars = global_vars
self.config = config
self.app_config = app_config
#
# Register specific constraints
#
self.register_constraint("constrain_presence")
self.register_constraint("constrain_input_boolean")
self.register_constraint("constrain_input_select")
self.register_constraint("constrain_days")
def _sub_stack(self, msg):
# If msg is a data structure of some type, don't sub
if type(msg) is str:
stack = inspect.stack()
if msg.find("__module__") != -1:
msg = msg.replace("__module__", stack[2][1])
if msg.find("__line__") != -1:
msg = msg.replace("__line__", str(stack[2][2]))
if msg.find("__function__") != -1:
msg = msg.replace("__function__", stack[2][3])
return msg
def set_namespace(self, namespace):
self.namespace = namespace
def _get_namespace(self, **kwargs):
if "namespace" in kwargs:
namespace = kwargs["namespace"]
del kwargs["namespace"]
else:
namespace = self.namespace
return namespace
#
# Listen state stub here as super class doesn't know the namespace
#
def listen_state(self, cb, entity=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return super(Hass, self).listen_state(namespace, cb, entity, **kwargs)
#
# Likewise with get state
#
def get_state(self, entity=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return super(Hass, self).get_state(namespace, entity, **kwargs)
def set_state(self, entity_id, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
self._check_entity(namespace, entity_id)
self.AD.log(
"DEBUG",
"set_state: {}, {}".format(entity_id, kwargs)
)
if entity_id in self.get_state():
new_state = self.get_state()[entity_id]
else:
# Its a new state entry
new_state = {}
new_state["attributes"] = {}
if "state" in kwargs:
new_state["state"] = kwargs["state"]
if "attributes" in kwargs:
new_state["attributes"].update(kwargs["attributes"])
config = self.AD.get_plugin(self._get_namespace(**kwargs)).config
if "certpath" in config:
certpath = config["certpath"]
else:
certpath = None
if "ha_key" in config and config["ha_key"] != "":
headers = {'x-ha-access': config["ha_key"]}
else:
headers = {}
apiurl = "{}/api/states/{}".format(config["ha_url"], entity_id)
r = requests.post(
apiurl, headers=headers, json=new_state, verify=certpath
)
r.raise_for_status()
state = r.json()
# Update AppDaemon's copy
self.AD.set_state(namespace, entity_id, state)
return state
def set_app_state(self, entity_id, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
self._check_entity(namespace, entity_id)
self.AD.log(
"DEBUG",
"set_app_state: {}, {}".format(entity_id, kwargs)
)
if entity_id in self.get_state():
new_state = self.get_state()[entity_id]
else:
# Its a new state entry
new_state = {}
new_state["attributes"] = {}
if "state" in kwargs:
new_state["state"] = kwargs["state"]
if "attributes" in kwargs:
new_state["attributes"].update(kwargs["attributes"])
# Update AppDaemon's copy
self.AD.set_app_state(namespace, entity_id, new_state)
return new_state
def entity_exists(self, entity_id, **kwargs):
if "namespace" in kwargs:
del kwargs["namespace"]
namespace = self._get_namespace(**kwargs)
return self.AD.entity_exists(namespace, entity_id)
#
# Events
#
def listen_event(self, cb, event=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return super(Hass, self).listen_event(namespace, cb, event, **kwargs)
#
# Utility
#
def split_entity(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
return entity_id.split(".")
def split_device_list(self, list_):
return list_.split(",")
def log(self, msg, level="INFO"):
msg = self._sub_stack(msg)
self.AD.log(level, msg, self.name)
def error(self, msg, level="WARNING"):
msg = self._sub_stack(msg)
self.AD.err(level, msg, self.name)
def get_hass_config(self, **kwargs):
namespace = self._get_namespace(**kwargs)
return self.AD.get_plugin_meta(namespace)
#
#
#
def friendly_name(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
state = self.get_state(**kwargs)
if entity_id in state:
if "friendly_name" in state[entity_id]["attributes"]:
return state[entity_id]["attributes"]["friendly_name"]
else:
return entity_id
return None
#
# Device Trackers
#
def get_trackers(self, **kwargs):
return (key for key, value in self.get_state("device_tracker", **kwargs).items())
def get_tracker_details(self, **kwargs):
return self.get_state("device_tracker", **kwargs)
def get_tracker_state(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
return self.get_state(entity_id, **kwargs)
def anyone_home(self, **kwargs):
state = self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = entity_id.split(".")
if thisdevice == "device_tracker":
if state[entity_id]["state"] == "home":
return True
return False
def everyone_home(self, **kwargs):
state = self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = entity_id.split(".")
if thisdevice == "device_tracker":
if state[entity_id]["state"] != "home":
return False
return True
def noone_home(self, **kwargs):
state = self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = entity_id.split(".")
if thisdevice == "device_tracker":
if state[entity_id]["state"] == "home":
return False
return True
#
# Built in constraints
#
def constrain_presence(self, value):
unconstrained = True
if value == "everyone" and not self.everyone_home():
unconstrained = False
elif value == "anyone" and not self.anyone_home():
unconstrained = False
elif value == "noone" and not self.noone_home():
unconstrained = False
return unconstrained
def constrain_input_boolean(self, value):
unconstrained = True
state = self.get_state()
values = value.split(",")
if len(values) == 2:
entity = values[0]
desired_state = values[1]
else:
entity = value
desired_state = "on"
if entity in state and state[entity]["state"] != desired_state:
unconstrained = False
return unconstrained
def constrain_input_select(self, value):
unconstrained = True
state = self.get_state()
values = value.split(",")
entity = values.pop(0)
if entity in state and state[entity]["state"] not in values:
unconstrained = False
return unconstrained
def constrain_days(self, value):
day = self.get_now().weekday()
daylist = [utils.day_of_week(day) for day in value.split(",")]
if day in daylist:
return True
return False
#
# Helper functions for services
#
@hass_check
def turn_on(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
self.call_service("homeassistant/turn_on", **rargs)
@hass_check
def turn_off(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
device, entity = self.split_entity(entity_id)
if device == "scene":
self.call_service("homeassistant/turn_on", **rargs)
else:
self.call_service("homeassistant/turn_off", **rargs)
@hass_check
def toggle(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
self.call_service("homeassistant/toggle", **rargs)
@hass_check
def set_value(self, entity_id, value, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "value": value}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["value"] = value
self.call_service("input_number/set_value", **rargs)
@hass_check
def set_textvalue(self, entity_id, value, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "value": value}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["value"] = value
self.call_service("input_text/set_value", **rargs)
@hass_check
def select_option(self, entity_id, option, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "option": option}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["option"] = option
self.call_service("input_select/select_option", **rargs)
@hass_check
def notify(self, message, **kwargs):
kwargs["message"] = message
if "name" in kwargs:
service = "notify/{}".format(kwargs["name"])
del kwargs["name"]
else:
service = "notify/notify"
self.call_service(service, **kwargs)
@hass_check
def persistent_notification(self, message, title=None, id=None):
kwargs = {}
kwargs["message"] = message
if title is not None:
kwargs["title"] = title
if id is not None:
kwargs["notification_id"] = id
self.call_service("persistent_notification/create", **kwargs)
#
# Event
#
@hass_check
def fire_event(self, event, **kwargs):
self.AD.log("DEBUG",
"fire_event: {}, {}".format(event, kwargs))
config = self.AD.get_plugin(self._get_namespace(**kwargs)).config
if "certpath" in config:
certpath = config["certpath"]
else:
certpath = None
if "ha_key" in config and config["ha_key"] != "":
headers = {'x-ha-access': config["ha_key"]}
else:
headers = {}
apiurl = "{}/api/events/{}".format(config["ha_url"], event)
r = requests.post(
apiurl, headers=headers, json=kwargs, verify=certpath
)
r.raise_for_status()
return r.json()
#
# Service
#
@staticmethod
def _check_service(service):
if service.find("/") == -1:
raise ValueError("Invalid Service Name: {}".format(service))
@hass_check
def call_service(self, service, **kwargs):
self._check_service(service)
d, s = service.split("/")
self.AD.log(
"DEBUG",
"call_service: {}/{}, {}".format(d, s, kwargs)
)
config = self.AD.get_plugin(self._get_namespace(**kwargs)).config
if "certpath" in config:
certpath = config["certpath"]
else:
certpath = None
if "ha_key" in config and config["ha_key"] != "":
headers = {'x-ha-access': config["ha_key"]}
else:
headers = {}
apiurl = "{}/api/services/{}/{}".format(config["ha_url"], d, s)
r = requests.post(
apiurl, headers=headers, json=kwargs, verify=certpath
)
r.raise_for_status()
return r.json()
<|code_end|>
| appdaemon/plugins/hass/hassapi.py
<|code_start|>import requests
import inspect
import appdaemon.appapi as appapi
import appdaemon.utils as utils
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#
# Define an entities class as a descriptor to enable read only access of HASS state
#
def hass_check(func):
def func_wrapper(*args, **kwargs):
self = args[0]
if not self.AD.get_plugin(self._get_namespace(**kwargs)).reading_messages:
self.AD.log("WARNING", "Attempt to call Home Assistant while disconnected: {}".format(func))
return lambda *args: None
else:
return func(*args, **kwargs)
return (func_wrapper)
class Hass(appapi.AppDaemon):
#
# Internal
#
def __init__(self, ad, name, logger, error, args, config, app_config, global_vars):
super(Hass, self).__init__(ad, name, logger, error, args, config, app_config, global_vars)
self.namespace = "default"
self.AD = ad
self.name = name
self._logger = logger
self._error = error
self.args = args
self.global_vars = global_vars
self.config = config
self.app_config = app_config
#
# Register specific constraints
#
self.register_constraint("constrain_presence")
self.register_constraint("constrain_input_boolean")
self.register_constraint("constrain_input_select")
self.register_constraint("constrain_days")
def _sub_stack(self, msg):
# If msg is a data structure of some type, don't sub
if type(msg) is str:
stack = inspect.stack()
if msg.find("__module__") != -1:
msg = msg.replace("__module__", stack[2][1])
if msg.find("__line__") != -1:
msg = msg.replace("__line__", str(stack[2][2]))
if msg.find("__function__") != -1:
msg = msg.replace("__function__", stack[2][3])
return msg
def set_namespace(self, namespace):
self.namespace = namespace
def _get_namespace(self, **kwargs):
if "namespace" in kwargs:
namespace = kwargs["namespace"]
del kwargs["namespace"]
else:
namespace = self.namespace
return namespace
#
# Listen state stub here as super class doesn't know the namespace
#
def listen_state(self, cb, entity=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return super(Hass, self).listen_state(namespace, cb, entity, **kwargs)
#
# Likewise with get state
#
def get_state(self, entity=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return super(Hass, self).get_state(namespace, entity, **kwargs)
def set_state(self, entity_id, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
self._check_entity(namespace, entity_id)
self.AD.log(
"DEBUG",
"set_state: {}, {}".format(entity_id, kwargs)
)
if entity_id in self.get_state():
new_state = self.get_state()[entity_id]
else:
# Its a new state entry
new_state = {}
new_state["attributes"] = {}
if "state" in kwargs:
new_state["state"] = kwargs["state"]
if "attributes" in kwargs:
new_state["attributes"].update(kwargs["attributes"])
config = self.AD.get_plugin(self._get_namespace(**kwargs)).config
if "cert_path" in config:
cert_path = config["cert_path"]
else:
cert_path = None
if "ha_key" in config and config["ha_key"] != "":
headers = {'x-ha-access': config["ha_key"]}
else:
headers = {}
apiurl = "{}/api/states/{}".format(config["ha_url"], entity_id)
r = requests.post(
apiurl, headers=headers, json=new_state, verify=cert_path
)
r.raise_for_status()
state = r.json()
# Update AppDaemon's copy
self.AD.set_state(namespace, entity_id, state)
return state
def set_app_state(self, entity_id, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
self._check_entity(namespace, entity_id)
self.AD.log(
"DEBUG",
"set_app_state: {}, {}".format(entity_id, kwargs)
)
if entity_id in self.get_state():
new_state = self.get_state()[entity_id]
else:
# Its a new state entry
new_state = {}
new_state["attributes"] = {}
if "state" in kwargs:
new_state["state"] = kwargs["state"]
if "attributes" in kwargs:
new_state["attributes"].update(kwargs["attributes"])
# Update AppDaemon's copy
self.AD.set_app_state(namespace, entity_id, new_state)
return new_state
def entity_exists(self, entity_id, **kwargs):
if "namespace" in kwargs:
del kwargs["namespace"]
namespace = self._get_namespace(**kwargs)
return self.AD.entity_exists(namespace, entity_id)
#
# Events
#
def listen_event(self, cb, event=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return super(Hass, self).listen_event(namespace, cb, event, **kwargs)
#
# Utility
#
def split_entity(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
return entity_id.split(".")
def split_device_list(self, list_):
return list_.split(",")
def log(self, msg, level="INFO"):
msg = self._sub_stack(msg)
self.AD.log(level, msg, self.name)
def error(self, msg, level="WARNING"):
msg = self._sub_stack(msg)
self.AD.err(level, msg, self.name)
def get_hass_config(self, **kwargs):
namespace = self._get_namespace(**kwargs)
return self.AD.get_plugin_meta(namespace)
#
#
#
def friendly_name(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
state = self.get_state(**kwargs)
if entity_id in state:
if "friendly_name" in state[entity_id]["attributes"]:
return state[entity_id]["attributes"]["friendly_name"]
else:
return entity_id
return None
#
# Device Trackers
#
def get_trackers(self, **kwargs):
return (key for key, value in self.get_state("device_tracker", **kwargs).items())
def get_tracker_details(self, **kwargs):
return self.get_state("device_tracker", **kwargs)
def get_tracker_state(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
return self.get_state(entity_id, **kwargs)
def anyone_home(self, **kwargs):
state = self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = entity_id.split(".")
if thisdevice == "device_tracker":
if state[entity_id]["state"] == "home":
return True
return False
def everyone_home(self, **kwargs):
state = self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = entity_id.split(".")
if thisdevice == "device_tracker":
if state[entity_id]["state"] != "home":
return False
return True
def noone_home(self, **kwargs):
state = self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = entity_id.split(".")
if thisdevice == "device_tracker":
if state[entity_id]["state"] == "home":
return False
return True
#
# Built in constraints
#
def constrain_presence(self, value):
unconstrained = True
if value == "everyone" and not self.everyone_home():
unconstrained = False
elif value == "anyone" and not self.anyone_home():
unconstrained = False
elif value == "noone" and not self.noone_home():
unconstrained = False
return unconstrained
def constrain_input_boolean(self, value):
unconstrained = True
state = self.get_state()
values = value.split(",")
if len(values) == 2:
entity = values[0]
desired_state = values[1]
else:
entity = value
desired_state = "on"
if entity in state and state[entity]["state"] != desired_state:
unconstrained = False
return unconstrained
def constrain_input_select(self, value):
unconstrained = True
state = self.get_state()
values = value.split(",")
entity = values.pop(0)
if entity in state and state[entity]["state"] not in values:
unconstrained = False
return unconstrained
def constrain_days(self, value):
day = self.get_now().weekday()
daylist = [utils.day_of_week(day) for day in value.split(",")]
if day in daylist:
return True
return False
#
# Helper functions for services
#
@hass_check
def turn_on(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
self.call_service("homeassistant/turn_on", **rargs)
@hass_check
def turn_off(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
device, entity = self.split_entity(entity_id)
if device == "scene":
self.call_service("homeassistant/turn_on", **rargs)
else:
self.call_service("homeassistant/turn_off", **rargs)
@hass_check
def toggle(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
self.call_service("homeassistant/toggle", **rargs)
@hass_check
def set_value(self, entity_id, value, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "value": value}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["value"] = value
self.call_service("input_number/set_value", **rargs)
@hass_check
def set_textvalue(self, entity_id, value, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "value": value}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["value"] = value
self.call_service("input_text/set_value", **rargs)
@hass_check
def select_option(self, entity_id, option, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "option": option}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["option"] = option
self.call_service("input_select/select_option", **rargs)
@hass_check
def notify(self, message, **kwargs):
kwargs["message"] = message
if "name" in kwargs:
service = "notify/{}".format(kwargs["name"])
del kwargs["name"]
else:
service = "notify/notify"
self.call_service(service, **kwargs)
@hass_check
def persistent_notification(self, message, title=None, id=None):
kwargs = {}
kwargs["message"] = message
if title is not None:
kwargs["title"] = title
if id is not None:
kwargs["notification_id"] = id
self.call_service("persistent_notification/create", **kwargs)
#
# Event
#
@hass_check
def fire_event(self, event, **kwargs):
self.AD.log("DEBUG",
"fire_event: {}, {}".format(event, kwargs))
config = self.AD.get_plugin(self._get_namespace(**kwargs)).config
if "cert_path" in config:
cert_path = config["cert_path"]
else:
cert_path = None
if "ha_key" in config and config["ha_key"] != "":
headers = {'x-ha-access': config["ha_key"]}
else:
headers = {}
apiurl = "{}/api/events/{}".format(config["ha_url"], event)
r = requests.post(
apiurl, headers=headers, json=kwargs, verify=cert_path
)
r.raise_for_status()
return r.json()
#
# Service
#
@staticmethod
def _check_service(service):
if service.find("/") == -1:
raise ValueError("Invalid Service Name: {}".format(service))
@hass_check
def call_service(self, service, **kwargs):
self._check_service(service)
d, s = service.split("/")
self.AD.log(
"DEBUG",
"call_service: {}/{}, {}".format(d, s, kwargs)
)
config = self.AD.get_plugin(self._get_namespace(**kwargs)).config
if "cert_path" in config:
cert_path = config["cert_path"]
else:
cert_path = None
if "ha_key" in config and config["ha_key"] != "":
headers = {'x-ha-access': config["ha_key"]}
else:
headers = {}
apiurl = "{}/api/services/{}/{}".format(config["ha_url"], d, s)
r = requests.post(
apiurl, headers=headers, json=kwargs, verify=cert_path
)
r.raise_for_status()
return r.json()
<|code_end|>
|
Avoid copying states in get_state() if read-only access is needed only
Hi,
I think that deep-copying the whole HA state adds considerable overhead to ``get_state()``, especially when called for a whole domain or even complete HA state.
It turns out that in most cases, you just need read-only access to the states. I see the reason why they're deep-copied at the moment, and this should probably stay the default behaviour, but I think there should be a way to retrieve states without this overhead when you really know you won't modify it. I, for instance, do sometimes query the whole state to search for entities with specific characteristics and know that I won't ever write to it.
I'd suggest a new keyword argument for ``get_state()``, maybe named ``copy`` with a default of ``True``. I can make a PR for this, if desired. I could even greatly simplify the ``get_state()`` code simultaneously.
What do you think? I can even file the PR for you to have a look at it directly, because that's no big deal to implement.
Best regards
Robert
| appdaemon/adapi.py
<|code_start|>import datetime
import inspect
import iso8601
import re
from datetime import timedelta
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class ADAPI:
#
# Internal
#
def __init__(self, ad: AppDaemon, name, logging_obj, args, config, app_config, global_vars):
# Store args
self.AD = ad
self.name = name
self._logging = logging_obj
self.config = config
self.app_config = app_config
self.args = args
self.global_vars = global_vars
self._namespace = "default"
self.logger = self._logging.get_child(name)
self.err = self._logging.get_error().getChild(name)
self.user_logs = {}
if "log_level" in args:
self.logger.setLevel(args["log_level"])
self.err.setLevel(args["log_level"])
if "log" in args:
userlog = self.get_user_log(args["log"])
if userlog is not None:
self.logger = userlog
@staticmethod
def _sub_stack(msg):
# If msg is a data structure of some type, don't sub
if type(msg) is str:
stack = inspect.stack()
if msg.find("__module__") != -1:
msg = msg.replace("__module__", stack[2][1])
if msg.find("__line__") != -1:
msg = msg.replace("__line__", str(stack[2][2]))
if msg.find("__function__") != -1:
msg = msg.replace("__function__", stack[2][3])
return msg
def _get_namespace(self, **kwargs):
if "namespace" in kwargs:
namespace = kwargs["namespace"]
del kwargs["namespace"]
else:
namespace = self._namespace
return namespace
#
# Logging
#
def _log(self, logger, msg, *args, **kwargs):
msg = self._sub_stack(msg)
if "level" in kwargs:
level = kwargs.get("level", "INFO")
kwargs.pop("level")
else:
level = "INFO"
ascii_encode = kwargs.get("ascii_encode", True)
if ascii_encode is True:
safe_enc = lambda s: str(s).encode("utf-8", "replace").decode("ascii", "replace")
msg = safe_enc(msg)
logger.log(self._logging.log_levels[level], msg, *args, **kwargs)
def log(self, msg, *args, **kwargs):
if "log" in kwargs:
# Its a user defined log
logger = self.get_user_log(kwargs["log"])
kwargs.pop("log")
else:
logger = self.logger
self._log(logger, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self._log(self.err, msg, *args, **kwargs)
def listen_log(self, cb, level="INFO", **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self, self.AD.logging.add_log_callback(namespace, self.name, cb, level, **kwargs))
def cancel_listen_log(self, handle):
self.logger.debug("Canceling listen_log for %s", self.name)
utils.run_coroutine_threadsafe(self, self.AD.logging.cancel_log_callback(self.name, handle))
def get_main_log(self):
return self.logger
def get_error_log(self):
return self.err
def get_user_log(self, log):
logger = None
if log in self.user_logs:
# Did we use it already?
logger = self.user_logs[log]
else:
# Build it on the fly
parent = self.AD.logging.get_user_log(self, log)
if parent is not None:
logger = parent.getChild(self.name)
self.user_logs[log] = logger
if "log_level" in self.args:
logger.setLevel(self.args["log_level"])
return logger
def set_log_level(self, level):
self.logger.setLevel(self._logging.log_levels[level])
self.err.setLevel(self._logging.log_levels[level])
for log in self.user_logs:
self.user_logs[log].setLevel(self._logging.log_levels[level])
def set_error_level(self, level):
self.err.setLevel(self._logging.log_levels[level])
#
# Threading
#
def set_app_pin(self, pin):
utils.run_coroutine_threadsafe(self, self.AD.threading.set_app_pin(self.name, pin))
def get_app_pin(self):
return utils.run_coroutine_threadsafe(self, self.AD.threading.get_app_pin(self.name))
def set_pin_thread(self, thread):
utils.run_coroutine_threadsafe(self, self.AD.threading.set_pin_thread(self.name, thread))
def get_pin_thread(self):
return utils.run_coroutine_threadsafe(self, self.AD.threading.get_pin_thread(self.name))
#
# Namespace
#
def set_namespace(self, namespace):
self._namespace = namespace
def get_namespace(self):
return self._namespace
def list_namespaces(self):
return utils.run_coroutine_threadsafe(self, self.AD.state.list_namespaces())
def save_namespace(self, namespace):
utils.run_coroutine_threadsafe(self, self.AD.state.save_namespace(namespace))
#
# Utility
#
def get_app(self, name):
return utils.run_coroutine_threadsafe(self, self.AD.app_management.get_app(name))
def _check_entity(self, namespace, entity):
if "." not in entity:
raise ValueError(
"{}: Invalid entity ID: {}".format(self.name, entity))
if not utils.run_coroutine_threadsafe(self, self.AD.state.entity_exists(namespace, entity)):
self.logger.warning("%s: Entity %s not found in namespace %s", self.name, entity, namespace)
def get_ad_version(self):
return utils.__version__
def entity_exists(self, entity_id, **kwargs):
namespace = self._get_namespace(**kwargs)
return utils.run_coroutine_threadsafe(self, self.AD.state.entity_exists(namespace, entity_id))
def split_entity(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
return entity_id.split(".")
def split_device_list(self, list_):
return list_.split(",")
def get_plugin_config(self, **kwargs):
namespace = self._get_namespace(**kwargs)
return utils.run_coroutine_threadsafe(self, self.AD.plugins.get_plugin_meta(namespace))
def friendly_name(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
state = self.get_state(**kwargs)
if entity_id in state:
if "friendly_name" in state[entity_id]["attributes"]:
return state[entity_id]["attributes"]["friendly_name"]
else:
return entity_id
return None
#
# Apiai
#
@staticmethod
def get_apiai_intent(data):
if "result" in data and "action" in data["result"]:
return data["result"]["action"]
else:
return None
@staticmethod
def get_apiai_slot_value(data, slot=None):
if "result" in data and \
"contexts" in data["result"]:
req = data.get('result')
contexts = req.get('contexts', [{}])
if contexts:
parameters = contexts[0].get('parameters')
else:
parameters = req.get('parameters')
if slot is None:
return parameters
else:
if slot in parameters:
return parameters[slot]
else:
return None
else:
return None
@staticmethod
def format_apiai_response(speech=None):
speech = \
{
"speech": speech,
"source": "Appdaemon",
"displayText": speech
}
return speech
#
# Alexa
#
@staticmethod
def format_alexa_response(speech=None, card=None, title=None):
response = \
{
"shouldEndSession": True
}
if speech is not None:
response["outputSpeech"] = \
{
"type": "PlainText",
"text": speech
}
if card is not None:
response["card"] = \
{
"type": "Simple",
"title": title,
"content": card
}
speech = \
{
"version": "1.0",
"response": response,
"sessionAttributes": {}
}
return speech
#
# API
#
def register_endpoint(self, cb, name=None):
if name is None:
ep = self.name
else:
ep = name
if self.AD.http is not None:
return utils.run_coroutine_threadsafe(self, self.AD.http.register_endpoint(cb, ep))
else:
self.logger.warning("register_endpoint for %s filed - HTTP component is not configured", name)
def unregister_endpoint(self, handle):
utils.run_coroutine_threadsafe(self, self.AD.http.unregister_endpoint(handle, self.name))
#
# State
#
def listen_state(self, cb, entity=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
name = self.name
if entity is not None and "." in entity:
self._check_entity(namespace, entity)
return utils.run_coroutine_threadsafe(self, self.AD.state.add_state_callback(name, namespace, entity, cb, kwargs))
def cancel_listen_state(self, handle):
self.logger.debug("Canceling listen_state for %s", self.name)
utils.run_coroutine_threadsafe(self, self.AD.state.cancel_state_callback(handle, self.name))
def info_listen_state(self, handle):
self.logger.debug("Calling info_listen_state for %s",self.name)
return utils.run_coroutine_threadsafe(self, self.AD.state.info_state_callback(handle, self.name))
def get_state(self, entity_id=None, attribute=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self, self.AD.state.get_state(self.name, namespace, entity_id, attribute, **kwargs))
def set_state(self, entity_id, **kwargs):
self.logger.debug("set state: %s, %s", entity_id, kwargs)
namespace = self._get_namespace(**kwargs)
self._check_entity(namespace, entity_id)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self,
self.AD.state.set_state(self.name, namespace, entity_id, **kwargs))
#
# Service
#
@staticmethod
def _check_service(service):
if service.find("/") == -1:
raise ValueError("Invalid Service Name: {}".format(service))
def call_service(self, service, **kwargs):
self._check_service(service)
d, s = service.split("/")
self.logger.debug("call_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self, self.AD.services.call_service(namespace, d, s, kwargs))
#
# Events
#
def listen_event(self, cb, event=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling listen_event for %s", self.name)
return utils.run_coroutine_threadsafe(self, self.AD.events.add_event_callback(_name, namespace, cb, event, **kwargs))
def cancel_listen_event(self, handle):
self.logger.debug("Canceling listen_event for %s", self.name)
utils.run_coroutine_threadsafe(self, self.AD.events.cancel_event_callback(self.name, handle))
def info_listen_event(self, handle):
self.logger.debug("Calling info_listen_event for %s", self.name)
return utils.run_coroutine_threadsafe(self, self.AD.events.info_event_callback(self.name, handle))
def fire_event(self, event, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
utils.run_coroutine_threadsafe(self, self.AD.events.fire_event(namespace, event, **kwargs))
#
# Time
#
def parse_utc_string(self, s):
return datetime.datetime(*map(
int, re.split('[^\d]', s)[:-1]
)).timestamp() + self.get_tz_offset() * 60
@staticmethod
def get_tz_offset():
utc_offset_min = int(round(
(datetime.datetime.now()
- datetime.datetime.utcnow()).total_seconds())
) / 60 # round for taking time twice
utc_offset_h = utc_offset_min / 60
# we do not handle 1/2 h timezone offsets
assert utc_offset_min == utc_offset_h * 60
return utc_offset_min
@staticmethod
def convert_utc(utc):
return iso8601.parse_date(utc)
def sun_up(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sun_up())
def sun_down(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sun_down())
def parse_time(self, time_str, name=None, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.parse_time(time_str, name, aware))
def parse_datetime(self, time_str, name=None, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.parse_datetime(time_str, name, aware))
def get_now(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_now())
def get_now_ts(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_now_ts())
def now_is_between(self, start_time_str, end_time_str, name=None):
return utils.run_coroutine_threadsafe(self, self.AD.sched.now_is_between(start_time_str, end_time_str, name))
def sunrise(self, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sunrise(aware))
def sunset(self, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sunset(aware))
def time(self):
return (utils.run_coroutine_threadsafe(self, self.AD.sched.get_now()).astimezone(self.AD.tz).time())
def datetime(self, aware=False):
if aware is True:
return (utils.run_coroutine_threadsafe(self, self.AD.sched.get_now()).astimezone(self.AD.tz))
else:
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_now_naive())
def date(self):
return (utils.run_coroutine_threadsafe(self, self.AD.sched.get_now()).astimezone(self.AD.tz).date())
def get_timezone(self):
return self.AD.time_zone
#
# Scheduler
#
def cancel_timer(self, handle):
name = self.name
utils.run_coroutine_threadsafe(self, self.AD.sched.cancel_timer(name, handle))
def info_timer(self, handle):
return utils.run_coroutine_threadsafe(self, self.AD.sched.info_timer(handle, self.name))
def run_in(self, callback, seconds, **kwargs):
name = self.name
self.logger.debug("Registering run_in in %s seconds for %s", seconds, name)
# convert seconds to an int if possible since a common pattern is to
# pass this through from the config file which is a string
exec_time = self.get_now() + timedelta(seconds=int(seconds))
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, exec_time, callback, False, None, **kwargs
))
return handle
def run_once(self, callback, start, **kwargs):
if type(start) == datetime.time:
when = start
elif type(start) == str:
when = utils.run_coroutine_threadsafe(self, self.AD.sched._parse_time(start, self.name, True))["datetime"].time()
else:
raise ValueError("Invalid type for start")
name = self.name
now = self.get_now()
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
one_day = datetime.timedelta(days=1)
event = event + one_day
exec_time = event.timestamp()
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, exec_time, callback, False, None, **kwargs
))
return handle
def run_at(self, callback, start, **kwargs):
if type(start) == datetime.datetime:
when = start
elif type(start) == str:
when = utils.run_coroutine_threadsafe(self, self.AD.sched._parse_time(start, self.name))["datetime"]
else:
raise ValueError("Invalid type for start")
aware_when = self.AD.sched.convert_naive(when)
name = self.name
now = self.get_now()
if aware_when < now:
raise ValueError(
"{}: run_at() Start time must be "
"in the future".format(self.name)
)
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, aware_when, callback, False, None, **kwargs
))
return handle
def run_daily(self, callback, start, **kwargs):
info = None
when = None
if type(start) == datetime.time:
when = start
elif type(start) == str:
info = utils.run_coroutine_threadsafe(self, self.AD.sched._parse_time(start, self.name))
else:
raise ValueError("Invalid type for start")
if info is None or info["sun"] is None:
if when is None:
when = info["datetime"].time()
aware_now = self.get_now()
now = self.AD.sched.make_naive(aware_now)
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
event = event + datetime.timedelta(days=1)
handle = self.run_every(callback, event, 24 * 60 * 60, **kwargs)
elif info["sun"] == "sunrise":
kwargs["offset"] = info["offset"]
handle = self.run_at_sunrise(callback, **kwargs)
else:
kwargs["offset"] = info["offset"]
handle = self.run_at_sunset(callback, **kwargs)
return handle
def run_hourly(self, callback, start, **kwargs):
now = self.get_now()
if start is None:
event = now + datetime.timedelta(hours=1)
else:
event = now
event = event.replace(minute=start.minute, second=start.second)
if event < now:
event = event + datetime.timedelta(hours=1)
handle = self.run_every(callback, event, 60 * 60, **kwargs)
return handle
def run_minutely(self, callback, start, **kwargs):
now = self.get_now()
if start is None:
event = now + datetime.timedelta(minutes=1)
else:
event = now
event = event.replace(second=start.second)
if event < now:
event = event + datetime.timedelta(minutes=1)
handle = self.run_every(callback, event, 60, **kwargs)
return handle
def run_every(self, callback, start, interval, **kwargs):
name = self.name
now = self.get_now()
aware_start = self.AD.sched.convert_naive(start)
if aware_start < now:
raise ValueError("start cannot be in the past")
self.logger.debug("Registering run_every starting %s in %ss intervals for %s", aware_start, interval, name)
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(name, aware_start, callback, True, None,
interval=interval, **kwargs))
return handle
def _schedule_sun(self, name, type_, callback, **kwargs):
if type_ == "next_rising":
event = self.AD.sched.next_sunrise()
else:
event = self.AD.sched.next_sunset()
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, event, callback, True, type_, **kwargs
))
return handle
def run_at_sunset(self, callback, **kwargs):
name = self.name
self.logger.debug("Registering run_at_sunset with kwargs = %s for %s", kwargs, name)
handle = self._schedule_sun(name, "next_setting", callback, **kwargs)
return handle
def run_at_sunrise(self, callback, **kwargs):
name = self.name
self.logger.debug("Registering run_at_sunrise with kwargs = %s for %s", kwargs, name)
handle = self._schedule_sun(name, "next_rising", callback, **kwargs)
return handle
#
# Dashboard
#
def dash_navigate(self, target, timeout=-1, ret=None, sticky=0):
kwargs = {"command": "navigate", "target": target, "sticky": sticky}
if timeout != -1:
kwargs["timeout"] = timeout
if ret is not None:
kwargs["return"] = ret
self.fire_event("__HADASHBOARD_EVENT", **kwargs)
#
# Other
#
def run_in_thread(self, callback, thread):
self.run_in(callback, 0, pin=False, pin_thread=thread)
def get_thread_info(self):
return utils.run_coroutine_threadsafe(self, self.AD.threading.get_thread_info())
def get_scheduler_entries(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_scheduler_entries())
def get_callback_entries(self):
return utils.run_coroutine_threadsafe(self, self.AD.callbacks.get_callback_entries())
@staticmethod
def get_alexa_slot_value(data, slot=None):
if "request" in data and \
"intent" in data["request"] and \
"slots" in data["request"]["intent"]:
if slot is None:
return data["request"]["intent"]["slots"]
else:
if slot in data["request"]["intent"]["slots"] and \
"value" in data["request"]["intent"]["slots"][slot]:
return data["request"]["intent"]["slots"][slot]["value"]
else:
return None
else:
return None
@staticmethod
def get_alexa_error(data):
if "request" in data and "err" in data["request"] and "message" in data["request"]["err"]:
return data["request"]["err"]["message"]
else:
return None
@staticmethod
def get_alexa_intent(data):
if "request" in data and "intent" in data["request"] and "name" in data["request"]["intent"]:
return data["request"]["intent"]["name"]
else:
return None
<|code_end|>
appdaemon/state.py
<|code_start|>import uuid
import traceback
import os
from copy import copy, deepcopy
import datetime
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class State:
def __init__(self, ad: AppDaemon):
self.AD = ad
self.state = {}
self.state["default"] = {}
self.state["admin"] = {}
self.logger = ad.logging.get_child("_state")
# Initialize User Defined Namespaces
nspath = os.path.join(self.AD.config_dir, "namespaces")
try:
if not os.path.isdir(nspath):
os.makedirs(nspath)
for ns in self.AD.namespaces:
self.logger.info("User Defined Namespace '%s' initialized", ns)
writeback = "safe"
if "writeback" in self.AD.namespaces[ns]:
writeback = self.AD.namespaces[ns]["writeback"]
safe = False
if writeback == "safe":
safe = True
self.state[ns] = utils.PersistentDict(os.path.join(nspath, ns), safe)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in namespace setup")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
async def list_namespaces(self):
ns = []
for namespace in self.state:
ns.append(namespace)
return ns
def list_namespace_entities(self, namespace):
et = []
if namespace in self.state:
for entity in self.state[namespace]:
et.append(entity)
return et
else:
return None
def terminate(self):
self.logger.debug("terminate() called for state")
self.logger.info("Saving all namespaces")
self.save_all_namespaces()
async def add_state_callback(self, name, namespace, entity, cb, kwargs):
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": cb,
"entity": entity,
"namespace": namespace,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs
}
#
# In the case of a quick_start parameter,
# start the clock immediately if the device is already in the new state
#
if "immediate" in kwargs and kwargs["immediate"] is True:
if entity is not None and "new" in kwargs and "duration" in kwargs:
if self.state[namespace][entity]["state"] == kwargs["new"]:
exec_time = await self.AD.sched.get_now_ts() + int(kwargs["duration"])
kwargs["__duration"] = await self.AD.sched.insert_schedule(
name, exec_time, cb, False, None,
__entity=entity,
__attribute=None,
__old_state=None,
__new_state=kwargs["new"], **kwargs
)
await self.AD.state.add_entity("admin", "state_callback.{}".format(handle), "active",
{"app": name, "listened_entity": entity, "function": cb.__name__,
"pinned": pin_app, "pinned_thread": pin_thread, "fired": 0, "executed":0, "kwargs": kwargs})
return handle
else:
return None
async def cancel_state_callback(self, handle, name):
if name not in self.AD.callbacks.callbacks or handle not in self.AD.callbacks.callbacks[name]:
self.logger.warning("Invalid callback in cancel_state_callback() from app {}".format(name))
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin",
"state_callback.{}".format(handle))
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
async def info_state_callback(self, handle, name):
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return (
callback["namespace"],
callback["entity"],
callback["kwargs"].get("attribute", None),
self.sanitize_state_kwargs(self.AD.app_management.objects[name]["object"],
callback["kwargs"])
)
else:
raise ValueError("Invalid handle: {}".format(handle))
async def process_state_callbacks(self, namespace, state):
data = state["data"]
entity_id = data['entity_id']
self.logger.debug(data)
device, entity = entity_id.split(".")
# Process state callbacks
removes = []
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["type"] == "state" and (callback["namespace"] == namespace or callback[
"namespace"] == "global" or namespace == "global"):
cdevice = None
centity = None
if callback["entity"] is not None:
if "." not in callback["entity"]:
cdevice = callback["entity"]
centity = None
else:
cdevice, centity = callback["entity"].split(".")
if callback["kwargs"].get("attribute") is None:
cattribute = "state"
else:
cattribute = callback["kwargs"].get("attribute")
cold = callback["kwargs"].get("old")
cnew = callback["kwargs"].get("new")
executed = False
if cdevice is None:
executed = await self.AD.threading.check_and_dispatch_state(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_,
callback["pin_app"],
callback["pin_thread"]
)
elif centity is None:
if device == cdevice:
executed = await self.AD.threading.check_and_dispatch_state(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_,
callback["pin_app"],
callback["pin_thread"]
)
elif device == cdevice and entity == centity:
executed = await self.AD.threading.check_and_dispatch_state(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'], cold,
cnew,
callback["kwargs"],
uuid_,
callback["pin_app"],
callback["pin_thread"]
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": callback["name"], "uuid": uuid_})
for remove in removes:
await self.cancel_state_callback(remove["uuid"], remove["name"])
async def entity_exists(self, namespace, entity):
if namespace in self.state and entity in self.state[namespace]:
return True
else:
return False
def get_entity(self, namespace = None, entity_id = None):
if namespace is None:
return self.state
elif entity_id is None:
if namespace in self.state:
return self.state[namespace]
else:
self.logger.warning("Unknown namespace: %s", namespace)
elif namespace in self.state:
if entity_id in self.state[namespace]:
return self.state[namespace][entity_id]
else:
self.logger.warning("Unknown namespace: %s", namespace)
return None
async def remove_entity(self, namespace, entity):
if entity in self.state[namespace]:
self.state[namespace].pop(entity)
data = \
{
"event_type": "__AD_ENTITY_REMOVED",
"data":
{
"entity_id": entity,
}
}
await self.AD.events.process_event(namespace, data)
async def add_entity(self, namespace, entity, state, attributes = None):
if attributes is None:
attrs = {}
else:
attrs = attributes
state = {"state": state, "last_changed": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)), "attributes": attrs}
self.state[namespace][entity] = state
data = \
{
"event_type": "__AD_ENTITY_ADDED",
"data":
{
"entity_id": entity,
"state": state,
}
}
await self.AD.events.process_event(namespace, data)
async def get_state(self, name, namespace, entity_id=None, attribute=None):
self.logger.debug("get_state: %s.%s", entity_id, attribute)
device = None
entity = None
if entity_id is not None and "." in entity_id:
if not await self.entity_exists(namespace, entity_id):
return None
if entity_id is not None:
if "." not in entity_id:
if attribute is not None:
raise ValueError(
"{}: Invalid entity ID: {}".format(name, entity))
device = entity_id
entity = None
else:
device, entity = entity_id.split(".")
if device is None:
return deepcopy(dict(self.state[namespace]))
elif entity is None:
devices = {}
for entity_id in self.state[namespace].keys():
thisdevice, thisentity = entity_id.split(".")
if device == thisdevice:
devices[entity_id] = self.state[namespace][entity_id]
return deepcopy(devices)
elif attribute is None:
entity_id = "{}.{}".format(device, entity)
if entity_id in self.state[namespace] and "state" in self.state[namespace][entity_id]:
return deepcopy(self.state[namespace][entity_id]["state"])
else:
return None
else:
entity_id = "{}.{}".format(device, entity)
if attribute == "all":
if entity_id in self.state[namespace]:
return deepcopy(self.state[namespace][entity_id])
else:
return None
else:
if namespace in self.state and entity_id in self.state[namespace]:
if attribute in self.state[namespace][entity_id]["attributes"]:
return deepcopy(self.state[namespace][entity_id]["attributes"][
attribute])
elif attribute in self.state[namespace][entity_id]:
return deepcopy(self.state[namespace][entity_id][attribute])
else:
return None
else:
return None
def parse_state(self, entity_id, namespace, **kwargs):
self.logger.debug("parse_state: %s, %s", entity_id, kwargs)
if entity_id in self.state[namespace]:
new_state = self.state[namespace][entity_id]
else:
# Its a new state entry
new_state = {}
new_state["attributes"] = {}
if "state" in kwargs:
new_state["state"] = kwargs["state"]
del kwargs["state"]
if "attributes" in kwargs and kwargs.get('replace', False):
new_state["attributes"] = kwargs["attributes"]
else:
if "attributes" in kwargs:
new_state["attributes"].update(kwargs["attributes"])
else:
if "replace" in kwargs:
del kwargs["replace"]
new_state["attributes"].update(kwargs)
return new_state
async def add_to_state(self, name, namespace, entity_id, i):
value = await self.get_state(name, namespace, entity_id)
if value is not None:
value += i
await self.set_state(name, namespace, entity_id, state=value)
async def add_to_attr(self, name, namespace, entity_id, attr, i):
state = await self.get_state(name, namespace, entity_id, attribute="all")
if state is not None:
state["attributes"][attr] = copy(state["attributes"][attr]) + i
await self.set_state(name, namespace, entity_id, attributes=state["attributes"])
def set_state_simple(self, namespace, entity_id, state):
self.state[namespace][entity_id] = state
async def set_state(self, name, namespace, entity_id, **kwargs):
self.logger.debug("set_state(): %s, %s", entity_id, kwargs)
if entity_id in self.state[namespace]:
old_state = deepcopy(self.state[namespace][entity_id])
else:
old_state = {"state": None, "attributes": {}}
new_state = self.parse_state(entity_id, namespace, **kwargs)
new_state["last_changed"] = utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz)
self.logger.debug("Old state: %s", old_state)
self.logger.debug("New state: %s", new_state)
if not await self.AD.state.entity_exists(namespace, entity_id):
self.logger.info("%s: Entity %s created in namespace: %s", name, entity_id, namespace)
# Fire the plugin's state update if it has one
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "set_plugin_state"):
# We assume that the state change will come back to us via the plugin
self.logger.debug("sending event to plugin")
result = await plugin.set_plugin_state(namespace, entity_id, **kwargs)
if result is not None:
if "entity_id" in result:
result.pop("entity_id")
self.state[namespace][entity_id] = self.parse_state(entity_id, namespace, **result)
else:
# Set the state locally
self.state[namespace][entity_id] = new_state
# Fire the event locally
self.logger.debug("sending event locally")
data = \
{
"event_type": "state_changed",
"data":
{
"entity_id": entity_id,
"new_state": new_state,
"old_state": old_state
}
}
await self.AD.events.process_event(namespace, data)
return new_state
def set_namespace_state(self, namespace, state):
self.state[namespace] = state
def update_namespace_state(self, namespace, state):
self.state[namespace].update(state)
async def save_namespace(self, namespace):
self.state[namespace].save()
def save_all_namespaces(self):
for ns in self.AD.namespaces:
self.state[ns].save()
def save_hybrid_namespaces(self):
for ns in self.AD.namespaces:
if self.AD.namespaces[ns]["writeback"] == "hybrid":
self.state[ns].save()
#
# Utilities
#
def sanitize_state_kwargs(self, app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, [
"old", "new", "__attribute", "duration", "state",
"__entity", "__duration", "__old_state", "__new_state",
"oneshot", "pin_app", "pin_thread", "__delay"
] + app.list_constraints())
<|code_end|>
| appdaemon/adapi.py
<|code_start|>import datetime
import inspect
import iso8601
import re
from datetime import timedelta
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class ADAPI:
#
# Internal
#
def __init__(self, ad: AppDaemon, name, logging_obj, args, config, app_config, global_vars):
# Store args
self.AD = ad
self.name = name
self._logging = logging_obj
self.config = config
self.app_config = app_config
self.args = args
self.global_vars = global_vars
self._namespace = "default"
self.logger = self._logging.get_child(name)
self.err = self._logging.get_error().getChild(name)
self.user_logs = {}
if "log_level" in args:
self.logger.setLevel(args["log_level"])
self.err.setLevel(args["log_level"])
if "log" in args:
userlog = self.get_user_log(args["log"])
if userlog is not None:
self.logger = userlog
@staticmethod
def _sub_stack(msg):
# If msg is a data structure of some type, don't sub
if type(msg) is str:
stack = inspect.stack()
if msg.find("__module__") != -1:
msg = msg.replace("__module__", stack[2][1])
if msg.find("__line__") != -1:
msg = msg.replace("__line__", str(stack[2][2]))
if msg.find("__function__") != -1:
msg = msg.replace("__function__", stack[2][3])
return msg
def _get_namespace(self, **kwargs):
if "namespace" in kwargs:
namespace = kwargs["namespace"]
del kwargs["namespace"]
else:
namespace = self._namespace
return namespace
#
# Logging
#
def _log(self, logger, msg, *args, **kwargs):
msg = self._sub_stack(msg)
if "level" in kwargs:
level = kwargs.get("level", "INFO")
kwargs.pop("level")
else:
level = "INFO"
ascii_encode = kwargs.get("ascii_encode", True)
if ascii_encode is True:
safe_enc = lambda s: str(s).encode("utf-8", "replace").decode("ascii", "replace")
msg = safe_enc(msg)
logger.log(self._logging.log_levels[level], msg, *args, **kwargs)
def log(self, msg, *args, **kwargs):
if "log" in kwargs:
# Its a user defined log
logger = self.get_user_log(kwargs["log"])
kwargs.pop("log")
else:
logger = self.logger
self._log(logger, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self._log(self.err, msg, *args, **kwargs)
def listen_log(self, cb, level="INFO", **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self, self.AD.logging.add_log_callback(namespace, self.name, cb, level, **kwargs))
def cancel_listen_log(self, handle):
self.logger.debug("Canceling listen_log for %s", self.name)
utils.run_coroutine_threadsafe(self, self.AD.logging.cancel_log_callback(self.name, handle))
def get_main_log(self):
return self.logger
def get_error_log(self):
return self.err
def get_user_log(self, log):
logger = None
if log in self.user_logs:
# Did we use it already?
logger = self.user_logs[log]
else:
# Build it on the fly
parent = self.AD.logging.get_user_log(self, log)
if parent is not None:
logger = parent.getChild(self.name)
self.user_logs[log] = logger
if "log_level" in self.args:
logger.setLevel(self.args["log_level"])
return logger
def set_log_level(self, level):
self.logger.setLevel(self._logging.log_levels[level])
self.err.setLevel(self._logging.log_levels[level])
for log in self.user_logs:
self.user_logs[log].setLevel(self._logging.log_levels[level])
def set_error_level(self, level):
self.err.setLevel(self._logging.log_levels[level])
#
# Threading
#
def set_app_pin(self, pin):
utils.run_coroutine_threadsafe(self, self.AD.threading.set_app_pin(self.name, pin))
def get_app_pin(self):
return utils.run_coroutine_threadsafe(self, self.AD.threading.get_app_pin(self.name))
def set_pin_thread(self, thread):
utils.run_coroutine_threadsafe(self, self.AD.threading.set_pin_thread(self.name, thread))
def get_pin_thread(self):
return utils.run_coroutine_threadsafe(self, self.AD.threading.get_pin_thread(self.name))
#
# Namespace
#
def set_namespace(self, namespace):
self._namespace = namespace
def get_namespace(self):
return self._namespace
def list_namespaces(self):
return utils.run_coroutine_threadsafe(self, self.AD.state.list_namespaces())
def save_namespace(self, namespace):
utils.run_coroutine_threadsafe(self, self.AD.state.save_namespace(namespace))
#
# Utility
#
def get_app(self, name):
return utils.run_coroutine_threadsafe(self, self.AD.app_management.get_app(name))
def _check_entity(self, namespace, entity):
if "." not in entity:
raise ValueError(
"{}: Invalid entity ID: {}".format(self.name, entity))
if not utils.run_coroutine_threadsafe(self, self.AD.state.entity_exists(namespace, entity)):
self.logger.warning("%s: Entity %s not found in namespace %s", self.name, entity, namespace)
def get_ad_version(self):
return utils.__version__
def entity_exists(self, entity_id, **kwargs):
namespace = self._get_namespace(**kwargs)
return utils.run_coroutine_threadsafe(self, self.AD.state.entity_exists(namespace, entity_id))
def split_entity(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
return entity_id.split(".")
def split_device_list(self, list_):
return list_.split(",")
def get_plugin_config(self, **kwargs):
namespace = self._get_namespace(**kwargs)
return utils.run_coroutine_threadsafe(self, self.AD.plugins.get_plugin_meta(namespace))
def friendly_name(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
state = self.get_state(**kwargs)
if entity_id in state:
if "friendly_name" in state[entity_id]["attributes"]:
return state[entity_id]["attributes"]["friendly_name"]
else:
return entity_id
return None
#
# Apiai
#
@staticmethod
def get_apiai_intent(data):
if "result" in data and "action" in data["result"]:
return data["result"]["action"]
else:
return None
@staticmethod
def get_apiai_slot_value(data, slot=None):
if "result" in data and \
"contexts" in data["result"]:
req = data.get('result')
contexts = req.get('contexts', [{}])
if contexts:
parameters = contexts[0].get('parameters')
else:
parameters = req.get('parameters')
if slot is None:
return parameters
else:
if slot in parameters:
return parameters[slot]
else:
return None
else:
return None
@staticmethod
def format_apiai_response(speech=None):
speech = \
{
"speech": speech,
"source": "Appdaemon",
"displayText": speech
}
return speech
#
# Alexa
#
@staticmethod
def format_alexa_response(speech=None, card=None, title=None):
response = \
{
"shouldEndSession": True
}
if speech is not None:
response["outputSpeech"] = \
{
"type": "PlainText",
"text": speech
}
if card is not None:
response["card"] = \
{
"type": "Simple",
"title": title,
"content": card
}
speech = \
{
"version": "1.0",
"response": response,
"sessionAttributes": {}
}
return speech
#
# API
#
def register_endpoint(self, cb, name=None):
if name is None:
ep = self.name
else:
ep = name
if self.AD.http is not None:
return utils.run_coroutine_threadsafe(self, self.AD.http.register_endpoint(cb, ep))
else:
self.logger.warning("register_endpoint for %s filed - HTTP component is not configured", name)
def unregister_endpoint(self, handle):
utils.run_coroutine_threadsafe(self, self.AD.http.unregister_endpoint(handle, self.name))
#
# State
#
def listen_state(self, cb, entity=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
name = self.name
if entity is not None and "." in entity:
self._check_entity(namespace, entity)
return utils.run_coroutine_threadsafe(self, self.AD.state.add_state_callback(name, namespace, entity, cb, kwargs))
def cancel_listen_state(self, handle):
self.logger.debug("Canceling listen_state for %s", self.name)
utils.run_coroutine_threadsafe(self, self.AD.state.cancel_state_callback(handle, self.name))
def info_listen_state(self, handle):
self.logger.debug("Calling info_listen_state for %s",self.name)
return utils.run_coroutine_threadsafe(self, self.AD.state.info_state_callback(handle, self.name))
def get_state(self, entity_id=None, attribute=None, default=None, copy=True, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self, self.AD.state.get_state(
self.name, namespace, entity_id, attribute, default, copy, **kwargs
))
def set_state(self, entity_id, **kwargs):
self.logger.debug("set state: %s, %s", entity_id, kwargs)
namespace = self._get_namespace(**kwargs)
self._check_entity(namespace, entity_id)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self,
self.AD.state.set_state(self.name, namespace, entity_id, **kwargs))
#
# Service
#
@staticmethod
def _check_service(service):
if service.find("/") == -1:
raise ValueError("Invalid Service Name: {}".format(service))
def call_service(self, service, **kwargs):
self._check_service(service)
d, s = service.split("/")
self.logger.debug("call_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self, self.AD.services.call_service(namespace, d, s, kwargs))
#
# Events
#
def listen_event(self, cb, event=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling listen_event for %s", self.name)
return utils.run_coroutine_threadsafe(self, self.AD.events.add_event_callback(_name, namespace, cb, event, **kwargs))
def cancel_listen_event(self, handle):
self.logger.debug("Canceling listen_event for %s", self.name)
utils.run_coroutine_threadsafe(self, self.AD.events.cancel_event_callback(self.name, handle))
def info_listen_event(self, handle):
self.logger.debug("Calling info_listen_event for %s", self.name)
return utils.run_coroutine_threadsafe(self, self.AD.events.info_event_callback(self.name, handle))
def fire_event(self, event, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
utils.run_coroutine_threadsafe(self, self.AD.events.fire_event(namespace, event, **kwargs))
#
# Time
#
def parse_utc_string(self, s):
return datetime.datetime(*map(
int, re.split('[^\d]', s)[:-1]
)).timestamp() + self.get_tz_offset() * 60
@staticmethod
def get_tz_offset():
utc_offset_min = int(round(
(datetime.datetime.now()
- datetime.datetime.utcnow()).total_seconds())
) / 60 # round for taking time twice
utc_offset_h = utc_offset_min / 60
# we do not handle 1/2 h timezone offsets
assert utc_offset_min == utc_offset_h * 60
return utc_offset_min
@staticmethod
def convert_utc(utc):
return iso8601.parse_date(utc)
def sun_up(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sun_up())
def sun_down(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sun_down())
def parse_time(self, time_str, name=None, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.parse_time(time_str, name, aware))
def parse_datetime(self, time_str, name=None, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.parse_datetime(time_str, name, aware))
def get_now(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_now())
def get_now_ts(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_now_ts())
def now_is_between(self, start_time_str, end_time_str, name=None):
return utils.run_coroutine_threadsafe(self, self.AD.sched.now_is_between(start_time_str, end_time_str, name))
def sunrise(self, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sunrise(aware))
def sunset(self, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sunset(aware))
def time(self):
return (utils.run_coroutine_threadsafe(self, self.AD.sched.get_now()).astimezone(self.AD.tz).time())
def datetime(self, aware=False):
if aware is True:
return (utils.run_coroutine_threadsafe(self, self.AD.sched.get_now()).astimezone(self.AD.tz))
else:
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_now_naive())
def date(self):
return (utils.run_coroutine_threadsafe(self, self.AD.sched.get_now()).astimezone(self.AD.tz).date())
def get_timezone(self):
return self.AD.time_zone
#
# Scheduler
#
def cancel_timer(self, handle):
name = self.name
utils.run_coroutine_threadsafe(self, self.AD.sched.cancel_timer(name, handle))
def info_timer(self, handle):
return utils.run_coroutine_threadsafe(self, self.AD.sched.info_timer(handle, self.name))
def run_in(self, callback, seconds, **kwargs):
name = self.name
self.logger.debug("Registering run_in in %s seconds for %s", seconds, name)
# convert seconds to an int if possible since a common pattern is to
# pass this through from the config file which is a string
exec_time = self.get_now() + timedelta(seconds=int(seconds))
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, exec_time, callback, False, None, **kwargs
))
return handle
def run_once(self, callback, start, **kwargs):
if type(start) == datetime.time:
when = start
elif type(start) == str:
when = utils.run_coroutine_threadsafe(self, self.AD.sched._parse_time(start, self.name, True))["datetime"].time()
else:
raise ValueError("Invalid type for start")
name = self.name
now = self.get_now()
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
one_day = datetime.timedelta(days=1)
event = event + one_day
exec_time = event.timestamp()
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, exec_time, callback, False, None, **kwargs
))
return handle
def run_at(self, callback, start, **kwargs):
if type(start) == datetime.datetime:
when = start
elif type(start) == str:
when = utils.run_coroutine_threadsafe(self, self.AD.sched._parse_time(start, self.name))["datetime"]
else:
raise ValueError("Invalid type for start")
aware_when = self.AD.sched.convert_naive(when)
name = self.name
now = self.get_now()
if aware_when < now:
raise ValueError(
"{}: run_at() Start time must be "
"in the future".format(self.name)
)
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, aware_when, callback, False, None, **kwargs
))
return handle
def run_daily(self, callback, start, **kwargs):
info = None
when = None
if type(start) == datetime.time:
when = start
elif type(start) == str:
info = utils.run_coroutine_threadsafe(self, self.AD.sched._parse_time(start, self.name))
else:
raise ValueError("Invalid type for start")
if info is None or info["sun"] is None:
if when is None:
when = info["datetime"].time()
aware_now = self.get_now()
now = self.AD.sched.make_naive(aware_now)
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
event = event + datetime.timedelta(days=1)
handle = self.run_every(callback, event, 24 * 60 * 60, **kwargs)
elif info["sun"] == "sunrise":
kwargs["offset"] = info["offset"]
handle = self.run_at_sunrise(callback, **kwargs)
else:
kwargs["offset"] = info["offset"]
handle = self.run_at_sunset(callback, **kwargs)
return handle
def run_hourly(self, callback, start, **kwargs):
now = self.get_now()
if start is None:
event = now + datetime.timedelta(hours=1)
else:
event = now
event = event.replace(minute=start.minute, second=start.second)
if event < now:
event = event + datetime.timedelta(hours=1)
handle = self.run_every(callback, event, 60 * 60, **kwargs)
return handle
def run_minutely(self, callback, start, **kwargs):
now = self.get_now()
if start is None:
event = now + datetime.timedelta(minutes=1)
else:
event = now
event = event.replace(second=start.second)
if event < now:
event = event + datetime.timedelta(minutes=1)
handle = self.run_every(callback, event, 60, **kwargs)
return handle
def run_every(self, callback, start, interval, **kwargs):
name = self.name
now = self.get_now()
aware_start = self.AD.sched.convert_naive(start)
if aware_start < now:
raise ValueError("start cannot be in the past")
self.logger.debug("Registering run_every starting %s in %ss intervals for %s", aware_start, interval, name)
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(name, aware_start, callback, True, None,
interval=interval, **kwargs))
return handle
def _schedule_sun(self, name, type_, callback, **kwargs):
if type_ == "next_rising":
event = self.AD.sched.next_sunrise()
else:
event = self.AD.sched.next_sunset()
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, event, callback, True, type_, **kwargs
))
return handle
def run_at_sunset(self, callback, **kwargs):
name = self.name
self.logger.debug("Registering run_at_sunset with kwargs = %s for %s", kwargs, name)
handle = self._schedule_sun(name, "next_setting", callback, **kwargs)
return handle
def run_at_sunrise(self, callback, **kwargs):
name = self.name
self.logger.debug("Registering run_at_sunrise with kwargs = %s for %s", kwargs, name)
handle = self._schedule_sun(name, "next_rising", callback, **kwargs)
return handle
#
# Dashboard
#
def dash_navigate(self, target, timeout=-1, ret=None, sticky=0):
kwargs = {"command": "navigate", "target": target, "sticky": sticky}
if timeout != -1:
kwargs["timeout"] = timeout
if ret is not None:
kwargs["return"] = ret
self.fire_event("__HADASHBOARD_EVENT", **kwargs)
#
# Other
#
def run_in_thread(self, callback, thread):
self.run_in(callback, 0, pin=False, pin_thread=thread)
def get_thread_info(self):
return utils.run_coroutine_threadsafe(self, self.AD.threading.get_thread_info())
def get_scheduler_entries(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_scheduler_entries())
def get_callback_entries(self):
return utils.run_coroutine_threadsafe(self, self.AD.callbacks.get_callback_entries())
@staticmethod
def get_alexa_slot_value(data, slot=None):
if "request" in data and \
"intent" in data["request"] and \
"slots" in data["request"]["intent"]:
if slot is None:
return data["request"]["intent"]["slots"]
else:
if slot in data["request"]["intent"]["slots"] and \
"value" in data["request"]["intent"]["slots"][slot]:
return data["request"]["intent"]["slots"][slot]["value"]
else:
return None
else:
return None
@staticmethod
def get_alexa_error(data):
if "request" in data and "err" in data["request"] and "message" in data["request"]["err"]:
return data["request"]["err"]["message"]
else:
return None
@staticmethod
def get_alexa_intent(data):
if "request" in data and "intent" in data["request"] and "name" in data["request"]["intent"]:
return data["request"]["intent"]["name"]
else:
return None
<|code_end|>
appdaemon/state.py
<|code_start|>import uuid
import traceback
import os
from copy import copy, deepcopy
import datetime
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class State:
def __init__(self, ad: AppDaemon):
self.AD = ad
self.state = {}
self.state["default"] = {}
self.state["admin"] = {}
self.logger = ad.logging.get_child("_state")
# Initialize User Defined Namespaces
nspath = os.path.join(self.AD.config_dir, "namespaces")
try:
if not os.path.isdir(nspath):
os.makedirs(nspath)
for ns in self.AD.namespaces:
self.logger.info("User Defined Namespace '%s' initialized", ns)
writeback = "safe"
if "writeback" in self.AD.namespaces[ns]:
writeback = self.AD.namespaces[ns]["writeback"]
safe = False
if writeback == "safe":
safe = True
self.state[ns] = utils.PersistentDict(os.path.join(nspath, ns), safe)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in namespace setup")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
async def list_namespaces(self):
ns = []
for namespace in self.state:
ns.append(namespace)
return ns
def list_namespace_entities(self, namespace):
et = []
if namespace in self.state:
for entity in self.state[namespace]:
et.append(entity)
return et
else:
return None
def terminate(self):
self.logger.debug("terminate() called for state")
self.logger.info("Saving all namespaces")
self.save_all_namespaces()
async def add_state_callback(self, name, namespace, entity, cb, kwargs):
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": cb,
"entity": entity,
"namespace": namespace,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs
}
#
# In the case of a quick_start parameter,
# start the clock immediately if the device is already in the new state
#
if "immediate" in kwargs and kwargs["immediate"] is True:
if entity is not None and "new" in kwargs and "duration" in kwargs:
if self.state[namespace][entity]["state"] == kwargs["new"]:
exec_time = await self.AD.sched.get_now_ts() + int(kwargs["duration"])
kwargs["__duration"] = await self.AD.sched.insert_schedule(
name, exec_time, cb, False, None,
__entity=entity,
__attribute=None,
__old_state=None,
__new_state=kwargs["new"], **kwargs
)
await self.AD.state.add_entity("admin", "state_callback.{}".format(handle), "active",
{"app": name, "listened_entity": entity, "function": cb.__name__,
"pinned": pin_app, "pinned_thread": pin_thread, "fired": 0, "executed":0, "kwargs": kwargs})
return handle
else:
return None
async def cancel_state_callback(self, handle, name):
if name not in self.AD.callbacks.callbacks or handle not in self.AD.callbacks.callbacks[name]:
self.logger.warning("Invalid callback in cancel_state_callback() from app {}".format(name))
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin",
"state_callback.{}".format(handle))
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
async def info_state_callback(self, handle, name):
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return (
callback["namespace"],
callback["entity"],
callback["kwargs"].get("attribute", None),
self.sanitize_state_kwargs(self.AD.app_management.objects[name]["object"],
callback["kwargs"])
)
else:
raise ValueError("Invalid handle: {}".format(handle))
async def process_state_callbacks(self, namespace, state):
data = state["data"]
entity_id = data['entity_id']
self.logger.debug(data)
device, entity = entity_id.split(".")
# Process state callbacks
removes = []
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["type"] == "state" and (callback["namespace"] == namespace or callback[
"namespace"] == "global" or namespace == "global"):
cdevice = None
centity = None
if callback["entity"] is not None:
if "." not in callback["entity"]:
cdevice = callback["entity"]
centity = None
else:
cdevice, centity = callback["entity"].split(".")
if callback["kwargs"].get("attribute") is None:
cattribute = "state"
else:
cattribute = callback["kwargs"].get("attribute")
cold = callback["kwargs"].get("old")
cnew = callback["kwargs"].get("new")
executed = False
if cdevice is None:
executed = await self.AD.threading.check_and_dispatch_state(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_,
callback["pin_app"],
callback["pin_thread"]
)
elif centity is None:
if device == cdevice:
executed = await self.AD.threading.check_and_dispatch_state(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_,
callback["pin_app"],
callback["pin_thread"]
)
elif device == cdevice and entity == centity:
executed = await self.AD.threading.check_and_dispatch_state(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'], cold,
cnew,
callback["kwargs"],
uuid_,
callback["pin_app"],
callback["pin_thread"]
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": callback["name"], "uuid": uuid_})
for remove in removes:
await self.cancel_state_callback(remove["uuid"], remove["name"])
async def entity_exists(self, namespace, entity):
if namespace in self.state and entity in self.state[namespace]:
return True
else:
return False
def get_entity(self, namespace = None, entity_id = None):
if namespace is None:
return self.state
elif entity_id is None:
if namespace in self.state:
return self.state[namespace]
else:
self.logger.warning("Unknown namespace: %s", namespace)
elif namespace in self.state:
if entity_id in self.state[namespace]:
return self.state[namespace][entity_id]
else:
self.logger.warning("Unknown namespace: %s", namespace)
return None
async def remove_entity(self, namespace, entity):
if entity in self.state[namespace]:
self.state[namespace].pop(entity)
data = \
{
"event_type": "__AD_ENTITY_REMOVED",
"data":
{
"entity_id": entity,
}
}
await self.AD.events.process_event(namespace, data)
async def add_entity(self, namespace, entity, state, attributes = None):
if attributes is None:
attrs = {}
else:
attrs = attributes
state = {"state": state, "last_changed": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)), "attributes": attrs}
self.state[namespace][entity] = state
data = \
{
"event_type": "__AD_ENTITY_ADDED",
"data":
{
"entity_id": entity,
"state": state,
}
}
await self.AD.events.process_event(namespace, data)
async def get_state(
self, name, namespace, entity_id=None, attribute=None,
default=None, copy=True
):
self.logger.debug("get_state: %s.%s %s %s",
entity_id, attribute, default, copy)
maybe_copy = lambda data: deepcopy(data) if copy else data
if entity_id is not None and "." in entity_id:
if not await self.entity_exists(namespace, entity_id):
return default
state = self.state[namespace][entity_id]
if attribute is None:
return maybe_copy(state["state"])
if attribute == "all":
return maybe_copy(state)
if attribute in state["attributes"]:
return maybe_copy(state["attributes"][attribute])
if attribute in state:
return maybe_copy(state[attribute])
return default
if attribute is not None:
raise ValueError(
"{}: Querying a specific attribute is only possible for a single entity"
.format(name)
)
if entity_id is None:
return maybe_copy(self.state[namespace])
domain = entity_id.split(".", 1)[0]
return {
entity_id: maybe_copy(state)
for entity_id, state in self.state[namespace].items()
if entity_id.split(".", 1)[0] == domain
}
def parse_state(self, entity_id, namespace, **kwargs):
self.logger.debug("parse_state: %s, %s", entity_id, kwargs)
if entity_id in self.state[namespace]:
new_state = self.state[namespace][entity_id]
else:
# Its a new state entry
new_state = {}
new_state["attributes"] = {}
if "state" in kwargs:
new_state["state"] = kwargs["state"]
del kwargs["state"]
if "attributes" in kwargs and kwargs.get('replace', False):
new_state["attributes"] = kwargs["attributes"]
else:
if "attributes" in kwargs:
new_state["attributes"].update(kwargs["attributes"])
else:
if "replace" in kwargs:
del kwargs["replace"]
new_state["attributes"].update(kwargs)
return new_state
async def add_to_state(self, name, namespace, entity_id, i):
value = await self.get_state(name, namespace, entity_id)
if value is not None:
value += i
await self.set_state(name, namespace, entity_id, state=value)
async def add_to_attr(self, name, namespace, entity_id, attr, i):
state = await self.get_state(name, namespace, entity_id, attribute="all")
if state is not None:
state["attributes"][attr] = copy(state["attributes"][attr]) + i
await self.set_state(name, namespace, entity_id, attributes=state["attributes"])
def set_state_simple(self, namespace, entity_id, state):
self.state[namespace][entity_id] = state
async def set_state(self, name, namespace, entity_id, **kwargs):
self.logger.debug("set_state(): %s, %s", entity_id, kwargs)
if entity_id in self.state[namespace]:
old_state = deepcopy(self.state[namespace][entity_id])
else:
old_state = {"state": None, "attributes": {}}
new_state = self.parse_state(entity_id, namespace, **kwargs)
new_state["last_changed"] = utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz)
self.logger.debug("Old state: %s", old_state)
self.logger.debug("New state: %s", new_state)
if not await self.AD.state.entity_exists(namespace, entity_id):
self.logger.info("%s: Entity %s created in namespace: %s", name, entity_id, namespace)
# Fire the plugin's state update if it has one
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "set_plugin_state"):
# We assume that the state change will come back to us via the plugin
self.logger.debug("sending event to plugin")
result = await plugin.set_plugin_state(namespace, entity_id, **kwargs)
if result is not None:
if "entity_id" in result:
result.pop("entity_id")
self.state[namespace][entity_id] = self.parse_state(entity_id, namespace, **result)
else:
# Set the state locally
self.state[namespace][entity_id] = new_state
# Fire the event locally
self.logger.debug("sending event locally")
data = \
{
"event_type": "state_changed",
"data":
{
"entity_id": entity_id,
"new_state": new_state,
"old_state": old_state
}
}
await self.AD.events.process_event(namespace, data)
return new_state
def set_namespace_state(self, namespace, state):
self.state[namespace] = state
def update_namespace_state(self, namespace, state):
self.state[namespace].update(state)
async def save_namespace(self, namespace):
self.state[namespace].save()
def save_all_namespaces(self):
for ns in self.AD.namespaces:
self.state[ns].save()
def save_hybrid_namespaces(self):
for ns in self.AD.namespaces:
if self.AD.namespaces[ns]["writeback"] == "hybrid":
self.state[ns].save()
#
# Utilities
#
def sanitize_state_kwargs(self, app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, [
"old", "new", "__attribute", "duration", "state",
"__entity", "__duration", "__old_state", "__new_state",
"oneshot", "pin_app", "pin_thread", "__delay"
] + app.list_constraints())
<|code_end|>
|
add oneshot to listen_event
I noticed that `listen_state` has `oneshot` which seems very useful. I think it makes sense to implement the same in `listen_event`. What do you guys think?
I might give this a shot if I have time soon.
| appdaemon/adapi.py
<|code_start|>import asyncio
import datetime
import inspect
import iso8601
import re
from datetime import timedelta
from copy import deepcopy
# needed for fake coro cb that looks like scheduler
import uuid
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class ADAPI:
"""AppDaemon API class.
This class includes all native API calls to AppDaemon
"""
#
# Internal parameters
#
def __init__(self, ad: AppDaemon, name, logging_obj, args, config, app_config, global_vars):
# Store args
self.AD = ad
self.name = name
self._logging = logging_obj
self.config = config
self.app_config = app_config
self.args = deepcopy(args)
self.app_dir = self.AD.app_dir
self.config_dir = self.AD.config_dir
self.dashboard_dir = self.AD.http.dashboard_dir
self.global_vars = global_vars
self._namespace = "default"
self.logger = self._logging.get_child(name)
self.err = self._logging.get_error().getChild(name)
self.user_logs = {}
if "log_level" in args:
self.logger.setLevel(args["log_level"])
self.err.setLevel(args["log_level"])
if "log" in args:
userlog = self.get_user_log(args["log"])
if userlog is not None:
self.logger = userlog
self.dialogflow_v = 2
@staticmethod
def _sub_stack(msg):
# If msg is a data structure of some type, don't sub
if type(msg) is str:
stack = inspect.stack()
if msg.find("__module__") != -1:
msg = msg.replace("__module__", stack[2][1])
if msg.find("__line__") != -1:
msg = msg.replace("__line__", str(stack[2][2]))
if msg.find("__function__") != -1:
msg = msg.replace("__function__", stack[2][3])
return msg
def _get_namespace(self, **kwargs):
if "namespace" in kwargs:
namespace = kwargs["namespace"]
del kwargs["namespace"]
else:
namespace = self._namespace
return namespace
#
# Logging
#
def _log(self, logger, msg, *args, **kwargs):
#
# Internal
#
if "level" in kwargs:
level = kwargs.pop("level", "INFO")
else:
level = "INFO"
ascii_encode = kwargs.pop("ascii_encode", True)
if ascii_encode is True:
safe_enc = lambda s: str(s).encode("utf-8", "replace").decode("ascii", "replace")
msg = safe_enc(msg)
logger.log(self._logging.log_levels[level], msg, *args, **kwargs)
def log(self, msg, *args, **kwargs):
"""Logs a message to AppDaemon's main logfile.
Args:
msg (str): The message to log.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
level (str, optional): The log level of the message - takes a string representing the
standard logger levels (Default: ``"WARNING"``).
ascii_encode (bool, optional): Switch to disable the encoding of all log messages to
ascii. Set this to true if you want to log UTF-8 characters (Default: ``True``).
log (str, optional): Send the message to a specific log, either system or user_defined.
System logs are ``main_log``, ``error_log``, ``diag_log`` or ``access_log``.
Any other value in use here must have a corresponding user-defined entity in
the ``logs`` section of appdaemon.yaml.
stack_info (bool, optional): If ``True`` the stack info will included.
Returns:
None.
Examples:
Log a message to the main logfile of the system.
>>> self.log("Log Test: Parameter is %s", some_variable)
Log a message to the specified logfile.
>>> self.log("Log Test: Parameter is %s", some_variable, log="test_log")
Log a message with error-level to the main logfile of the system.
>>> self.log("Log Test: Parameter is %s", some_variable, level = "ERROR")
Log a message using `placeholders` to the main logfile of the system.
>>> self.log("Line: __line__, module: __module__, function: __function__, Msg: Something bad happened")
Log a WARNING message (including the stack info) to the main logfile of the system.
>>> self.log("Stack is", some_value, level="WARNING", stack_info=True)
"""
if "log" in kwargs:
# Its a user defined log
logger = self.get_user_log(kwargs["log"])
kwargs.pop("log")
else:
logger = self.logger
msg = self._sub_stack(msg)
self._log(logger, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""Logs a message to AppDaemon's error logfile.
Args:
msg (str): The message to log.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
level (str, optional): The log level of the message - takes a string representing the
standard logger levels.
ascii_encode (bool, optional): Switch to disable the encoding of all log messages to
ascii. Set this to true if you want to log UTF-8 characters (Default: ``True``).
log (str, optional): Send the message to a specific log, either system or user_defined.
System logs are ``main_log``, ``error_log``, ``diag_log`` or ``access_log``.
Any other value in use here must have a corresponding user-defined entity in
the ``logs`` section of appdaemon.yaml.
Returns:
None.
Examples:
Log an error message to the error logfile of the system.
>>> self.error("Some Warning string")
Log an error message with critical-level to the error logfile of the system.
>>> self.error("Some Critical string", level = "CRITICAL")
"""
self._log(self.err, msg, *args, **kwargs)
@utils.sync_wrapper
async def listen_log(self, callback, level="INFO", **kwargs):
"""Registers the App to receive a callback every time an App logs a message.
Args:
callback (function): Function to be called when a message is logged.
level (str): Logging level to be used - lower levels will not be forwarded
to the app (Default: ``"INFO"``).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
log (str, optional): Name of the log to listen to, default is all logs. The name
should be one of the 4 built in types ``main_log``, ``error_log``, ``diag_log``
or ``access_log`` or a user defined log entry.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A unique identifier that can be used to cancel the callback if required.
Since variables created within object methods are local to the function they are
created in, and in all likelihood, the cancellation will be invoked later in a
different function, it is recommended that handles are stored in the object
namespace, e.g., self.handle.
Examples:
Listen to all ``WARNING`` log messages of the system.
>>> self.handle = self.listen_log(self.cb, "WARNING")
Listen to all ``WARNING`` log messages of the `main_log`.
>>> self.handle = self.listen_log(self.cb, "WARNING", log="main_log")
Listen to all ``WARNING`` log messages of a user-defined logfile.
>>> self.handle = self.listen_log(self.cb, "WARNING", log="my_custom_log")
"""
namespace = kwargs.pop("namespace", "admin")
return await self.AD.logging.add_log_callback(namespace, self.name, callback, level, **kwargs)
@utils.sync_wrapper
async def cancel_listen_log(self, handle):
"""Cancels the log callback for the App.
Args:
handle: The handle returned when the `listen_log` call was made.
Returns:
None.
Examples:
>>> self.cancel_listen_log(handle)
"""
self.logger.debug("Canceling listen_log for %s", self.name)
await self.AD.logging.cancel_log_callback(self.name, handle)
def get_main_log(self):
"""Returns the underlying logger object used for the main log.
Examples:
Log a critical message to the `main` logfile of the system.
>>> log = self.get_main_log()
>>> log.critical("Log a critical error")
"""
return self.logger
def get_error_log(self):
"""Returns the underlying logger object used for the error log.
Examples:
Log an error message to the `error` logfile of the system.
>>> error_log = self.get_error_log()
>>> error_log.error("Log an error", stack_info=True, exc_info=True)
"""
return self.err
def get_user_log(self, log):
"""Gets the specified-user logger of the App.
Args:
log (str): The name of the log you want to get the underlying logger object from,
as described in the ``logs`` section of ``appdaemon.yaml``.
Returns:
The underlying logger object used for the error log.
Examples:
Log an error message to a user-defined logfile.
>>> log = self.get_user_log("test_log")
>>> log.error("Log an error", stack_info=True, exc_info=True)
"""
logger = None
if log in self.user_logs:
# Did we use it already?
logger = self.user_logs[log]
else:
# Build it on the fly
parent = self.AD.logging.get_user_log(self, log)
if parent is not None:
logger = parent.getChild(self.name)
self.user_logs[log] = logger
if "log_level" in self.args:
logger.setLevel(self.args["log_level"])
return logger
def set_log_level(self, level):
"""Sets a specific log level for the App.
Args:
level (str): Log level.
Returns:
None.
Notes:
Supported log levels: ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``,
``DEBUG``, ``NOTSET``.
Examples:
>>> self.set_log_level("DEBUG")
"""
self.logger.setLevel(self._logging.log_levels[level])
self.err.setLevel(self._logging.log_levels[level])
for log in self.user_logs:
self.user_logs[log].setLevel(self._logging.log_levels[level])
def set_error_level(self, level):
"""Sets the log level to send to the `error` logfile of the system.
Args:
level (str): Error level.
Returns:
None.
Notes:
Supported log levels: ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``,
``DEBUG``, ``NOTSET``.
"""
self.err.setLevel(self._logging.log_levels[level])
#
# Threading
#
@utils.sync_wrapper
async def set_app_pin(self, pin):
"""Sets an App to be pinned or unpinned.
Args:
pin (bool): Sets whether the App becomes pinned or not.
Returns:
None.
Examples:
The following line should be put inside the `initialize()` function.
>>> self.set_app_pin(True)
"""
await self.AD.threading.set_app_pin(self.name, pin)
@utils.sync_wrapper
async def get_app_pin(self):
"""Finds out if the current App is currently pinned or not.
Returns:
bool: ``True`` if the App is pinned, ``False`` otherwise.
Examples:
>>> if self.get_app_pin(True):
>>> self.log("App pinned!")
"""
return await self.AD.threading.get_app_pin(self.name)
@utils.sync_wrapper
async def set_pin_thread(self, thread):
"""Sets the thread that the App will be pinned to.
Args:
thread (int): Number of the thread to pin to. Threads start at 0 and go up to the number
of threads specified in ``appdaemon.yaml`` -1.
Returns:
None.
Examples:
The following line should be put inside the `initialize()` function.
>>> self.set_pin_thread(5)
"""
return await self.AD.threading.set_pin_thread(self.name, thread)
@utils.sync_wrapper
async def get_pin_thread(self):
"""Finds out which thread the App is pinned to.
Returns:
int: The thread number or -1 if the App is not pinned.
Examples:
>>> thread = self.get_pin_thread():
>>> self.log(f"I'm pinned to thread: {thread}")
"""
return await self.AD.threading.get_pin_thread(self.name)
#
# Namespace
#
def set_namespace(self, namespace):
"""Sets a new namespace for the App to use from that point forward.
Args:
namespace (str): Name of the new namespace
Returns:
None.
Examples:
>>> self.set_namespace("hass1")
"""
self._namespace = namespace
def get_namespace(self):
"""Returns the App's namespace."""
return self._namespace
@utils.sync_wrapper
async def list_namespaces(self):
"""Returns a list of available namespaces.
Examples:
>>> self.list_namespaces()
"""
return await self.AD.state.list_namespaces()
@utils.sync_wrapper
async def save_namespace(self, **kwargs):
"""Saves entities created in user-defined namespaces into a file.
This way, when AD restarts these entities will be reloaded into AD with its
previous states within the namespace. This can be used as a basic form of
non-volatile storage of entity data. Depending on the configuration of the
namespace, this function can be setup to constantly be running automatically
or only when AD shutdown. This function also allows for users to manually
execute the command as when needed.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Save all entities of the default namespace.
>>> self.save_namespace()
"""
namespace = self._get_namespace(**kwargs)
await self.AD.state.save_namespace(namespace)
#
# Utility
#
@utils.sync_wrapper
async def get_app(self, name):
"""Gets the instantiated object of another app running within the system.
This is useful for calling functions or accessing variables that reside
in different apps without requiring duplication of code.
Args:
name (str): Name of the app required. This is the name specified in
header section of the config file, not the module or class.
Returns:
An object reference to the class.
Examples:
>>> MyApp = self.get_app("MotionLights")
>>> MyApp.turn_light_on()
"""
return await self.AD.app_management.get_app(name)
@utils.sync_wrapper
async def _check_entity(self, namespace, entity):
if "." not in entity:
raise ValueError(
"{}: Invalid entity ID: {}".format(self.name, entity))
if not await self.AD.state.entity_exists(namespace, entity):
self.logger.warning("%s: Entity %s not found in namespace %s", self.name, entity, namespace)
@staticmethod
def get_ad_version():
"""Returns a string with the current version of AppDaemon.
Examples:
>>> version = self.get_ad_version()
"""
return utils.__version__
@utils.sync_wrapper
async def entity_exists(self, entity_id, **kwargs):
"""Checks the existence of an entity in Home Assistant.
When working with multiple Home Assistant instances, it is possible to specify the
namespace, so that it checks within the right instance in in the event the app is
working in a different instance. Also when using this function, it is also possible
to check if an AppDaemon entity exists.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
bool: ``True`` if the entity id exists, ``False`` otherwise.
Examples:
Check if the entity light.living_room exist within the app's namespace
>>> if self.entity_exists("light.living_room"):
>>> #do something
Check if the entity mqtt.security_settings exist within the `mqtt` namespace
if the app is operating in a different namespace like default
>>> if self.entity_exists("mqtt.security_settings", namespace = "mqtt"):
>>> #do something
"""
namespace = self._get_namespace(**kwargs)
return await self.AD.state.entity_exists(namespace, entity_id)
@utils.sync_wrapper
async def split_entity(self, entity_id, **kwargs):
"""Splits an entity into parts.
This utility function will take a fully qualified entity id of the form ``light.hall_light``
and split it into 2 values, the device and the entity, e.g. light and hall_light.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
A list with 2 entries, the device and entity respectively.
Examples:
Do some action if the device of the entity is `scene`.
>>> device, entity = self.split_entity(entity_id)
>>> if device == "scene":
>>> #do something specific to scenes
"""
await self._check_entity(self._get_namespace(**kwargs), entity_id)
return entity_id.split(".")
@utils.sync_wrapper
async def remove_entity(self, entity_id, **kwargs):
"""Deletes an entity created within a namespaces.
If an entity was created, and its deemed no longer needed, by using this function,
the entity can be removed from AppDaemon permanently.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Delete the entity in the present namespace.
>>> self.remove_entity('sensor.living_room')
Delete the entity in the `mqtt` namespace.
>>> self.remove_entity('mqtt.living_room_temperature', namespace = 'mqtt')
"""
namespace = self._get_namespace(**kwargs)
await self.AD.state.remove_entity(namespace, entity_id)
return None
@staticmethod
def split_device_list(devices):
"""Converts a comma-separated list of device types to an iterable list.
This is intended to assist in use cases where the App takes a list of
entities from an argument, e.g., a list of sensors to monitor. If only
one entry is provided, an iterable list will still be returned to avoid
the need for special processing.
Args:
devices (str): A comma-separated list of devices to be split (without spaces).
Returns:
A list of split devices with 1 or more entries.
Examples:
>>> for sensor in self.split_device_list(self.args["sensors"]):
>>> #do something for each sensor, e.g., make a state subscription
"""
return devices.split(",")
@utils.sync_wrapper
async def get_plugin_config(self, **kwargs):
"""Gets any useful metadata that the plugin may have available.
For instance, for the HASS plugin, this will return Home Assistant configuration
data such as latitude and longitude.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str): Select the namespace of the plugin for which data is desired.
Returns:
A dictionary containing all the configuration information available
from the Home Assistant ``/api/config`` endpoint.
Examples:
>>> config = self.get_plugin_config()
>>> self.log(f'My current position is {config["latitude"]}(Lat), {config["longitude"]}(Long)')
My current position is 50.8333(Lat), 4.3333(Long)
"""
namespace = self._get_namespace(**kwargs)
return await self.AD.plugins.get_plugin_meta(namespace)
@utils.sync_wrapper
async def friendly_name(self, entity_id, **kwargs):
"""Gets the Friendly Name of an entity.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
str: The friendly name of the entity if it exists or the entity id if not.
Examples:
>>> tracker = "device_tracker.andrew"
>>> friendly_name = self.friendly_name(tracker)
>>> tracker_state = self.get_tracker_state(tracker)
>>> self.log(f"{tracker} ({friendly_name}) is {tracker_state}.")
device_tracker.andrew (Andrew Tracker) is on.
"""
await self._check_entity(self._get_namespace(**kwargs), entity_id)
state = await self.get_state(**kwargs)
if entity_id in state:
if "friendly_name" in state[entity_id]["attributes"]:
return state[entity_id]["attributes"]["friendly_name"]
else:
return entity_id
return None
@utils.sync_wrapper
async def set_production_mode(self, mode=True):
"""Deactivates or activates the production mode in AppDaemon.
When called without declaring passing any arguments, mode defaults to ``True``.
Args:
mode (bool): If it is ``True`` the production mode is activated, or deactivated
otherwise.
Returns:
The specified mode or ``None`` if a wrong parameter is passed.
"""
if not isinstance(mode, bool):
self.logger.warning("%s not a valid parameter for Production Mode", mode)
return None
await self.AD.utility.set_production_mode(mode)
return mode
#
# Internal Helper functions
#
def start_app(self, app, **kwargs):
"""Starts an App which can either be running or not.
This Api call cannot start an app which has already been disabled in the App Config.
It essentially only runs the initialize() function in the app, and changes to attributes
like class name or app config is not taken into account.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.start_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "appdaemon"
self.call_service("app/start", **kwargs)
return None
def stop_app(self, app, **kwargs):
"""Stops an App which is running.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.stop_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "appdaemon"
self.call_service("app/stop", **kwargs)
return None
def restart_app(self, app, **kwargs):
"""Restarts an App which can either be running or not.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.restart_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "appdaemon"
self.call_service("app/restart", **kwargs)
return None
def reload_apps(self, **kwargs):
"""Reloads the apps, and loads up those that have changes made to their .yaml or .py files.
This utility function can be used if AppDaemon is running in production mode, and it is
needed to reload apps that changes have been made to.
Args:
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.reload_apps()
"""
kwargs["namespace"] = "appdaemon"
self.call_service("app/reload", **kwargs)
return None
#
# Dialogflow
#
def get_dialogflow_intent(self, data):
"""Gets the intent's action from the Google Home response.
Args:
data: Response received from Google Home.
Returns:
A string representing the Intent from the interaction model that was requested,
or ``None``, if no action was received.
Examples:
>>> intent = ADAPI.get_dialogflow_intent(data)
"""
if "result" in data and "action" in data["result"]:
self.dialogflow_v = 1
return data["result"]["action"]
elif "queryResult" in data and "action" in data["queryResult"]:
self.dialogflow_v = 2
return data["queryResult"]["action"]
else:
return None
@staticmethod
def get_dialogflow_slot_value(data, slot=None):
"""Gets slots' values from the interaction model.
Args:
data: Response received from Google Home.
slot (str): Name of the slot. If a name is not specified, all slots will be returned
as a dictionary. If a name is specified but is not found, ``None`` will be returned.
Returns:
A string representing the value of the slot from the interaction model, or a hash of slots.
Examples:
>>> beer_type = ADAPI.get_dialogflow_intent(data, "beer_type")
>>> all_slots = ADAPI.get_dialogflow_intent(data)
"""
if "result" in data:
# using V1 API
contexts = data["result"]["contexts"][0]
if contexts:
parameters = contexts.get("parameters")
else:
parameters = data["result"]["parameters"]
if slot is None:
return parameters
elif slot in parameters:
return parameters[slot]
else:
return None
elif "queryResult" in data:
# using V2 API
contexts = data["queryResult"]["outputContexts"][0]
if contexts:
parameters = contexts.get("parameters")
else:
parameters = data["queryResult"]["parameters"]
if slot is None:
return parameters
elif slot in parameters:
return parameters[slot]
else:
return None
else:
return None
def format_dialogflow_response(self, speech=None):
"""Formats a response to be returned to Google Home, including speech.
Args:
speech (str): The text for Google Home to say.
Returns:
None.
Examples:
>>> ADAPI.format_dialogflow_response(speech = "Hello World")
"""
if self.dialogflow_v == 1:
speech = \
{
"speech": speech,
"source": "Appdaemon",
"displayText": speech
}
elif self.dialogflow_v == 2:
speech = \
{
"fulfillmentText": speech,
"source": "Appdaemon"
}
else:
speech = None
return speech
#
# Alexa
#
@staticmethod
def format_alexa_response(speech=None, card=None, title=None):
"""Formats a response to be returned to Alex including speech and a card.
Args:
speech (str): The text for Alexa to say.
card (str): Text for the card.
title (str): Title for the card.
Returns:
None.
Examples:
>>> ADAPI.format_alexa_response(speech = "Hello World", card = "Greetings to the world", title = "Hello")
"""
response = \
{
"shouldEndSession": True
}
if speech is not None:
response["outputSpeech"] = \
{
"type": "PlainText",
"text": speech
}
if card is not None:
response["card"] = \
{
"type": "Simple",
"title": title,
"content": card
}
speech = \
{
"version": "1.0",
"response": response,
"sessionAttributes": {}
}
return speech
@staticmethod
def get_alexa_error(data):
"""Gets the error message from the Alexa API response.
Args:
data: Response received from the Alexa API .
Returns:
A string representing the value of message, or ``None`` if no error message was received.
"""
if "request" in data and "err" in data["request"] and "message" in data["request"]["err"]:
return data["request"]["err"]["message"]
else:
return None
@staticmethod
def get_alexa_intent(data):
"""Gets the Intent's name from the Alexa response.
Args:
data: Response received from Alexa.
Returns:
A string representing the Intent's name from the interaction model that was requested,
or ``None``, if no Intent was received.
Examples:
>>> intent = ADAPI.get_alexa_intent(data)
"""
if "request" in data and "intent" in data["request"] and "name" in data["request"]["intent"]:
return data["request"]["intent"]["name"]
else:
return None
@staticmethod
def get_alexa_slot_value(data, slot=None):
"""Gets values for slots from the interaction model.
Args:
data: The request data received from Alexa.
slot: Name of the slot. If a name is not specified, all slots will be returned as
a dictionary. If a name is specified but is not found, None will be returned.
Returns:
A ``string`` representing the value of the slot from the interaction model, or a ``hash`` of slots.
Examples:
>>> beer_type = ADAPI.get_alexa_intent(data, "beer_type")
>>> all_slots = ADAPI.get_alexa_intent(data)
"""
if "request" in data and \
"intent" in data["request"] and \
"slots" in data["request"]["intent"]:
if slot is None:
return data["request"]["intent"]["slots"]
else:
if slot in data["request"]["intent"]["slots"] and \
"value" in data["request"]["intent"]["slots"][slot]:
return data["request"]["intent"]["slots"][slot]["value"]
else:
return None
else:
return None
#
# API
#
@utils.sync_wrapper
async def register_endpoint(self, callback, name=None):
"""Registers an endpoint for API calls into the current App.
Args:
callback: The function to be called when a request is made to the named endpoint.
name (str, optional): The name of the endpoint to be used for the call (Default: ``None``).
Returns:
A handle that can be used to remove the registration.
Examples:
It should be noted that the register function, should return a string (can be empty),
and an HTTP OK status response (e.g., `200`. If this is not added as a returned response,
the function will generate an error each time it is processed.
>>> self.register_endpoint(my_callback)
>>> self.register_callback(alexa_cb, "alexa")
"""
if name is None:
ep = self.name
else:
ep = name
if self.AD.http is not None:
return await self.AD.http.register_endpoint(callback, ep)
else:
self.logger.warning("register_endpoint for %s filed - HTTP component is not configured", name)
return None
@utils.sync_wrapper
async def unregister_endpoint(self, handle):
"""Removes a previously registered endpoint.
Args:
handle: A handle returned by a previous call to ``register_endpoint``
Returns:
None.
Examples:
>>> self.unregister_endpoint(handle)
"""
await self.AD.http.unregister_endpoint(handle, self.name)
#
# State
#
@utils.sync_wrapper
async def listen_state(self, callback, entity=None, **kwargs):
"""Registers a callback to react to state changes.
This function allows the user to register a callback for a wide variety of state changes.
Args:
callback: Function to be invoked when the requested state change occurs. It must conform
to the standard State Callback format documented `here <APPGUIDE.html#state-callbacks>`__
entity (str, optional): name of an entity or device type. If just a device type is provided,
e.g., `light`, or `binary_sensor`. ``listen_state()`` will subscribe to state changes of all
devices of that type. If a fully qualified entity_id is provided, ``listen_state()`` will
listen for state changes for just that entity.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
attribute (str, optional): Name of an attribute within the entity state object. If this
parameter is specified in addition to a fully qualified ``entity_id``. ``listen_state()``
will subscribe to changes for just that attribute within that specific entity.
The ``new`` and ``old`` parameters in the callback function will be provided with
a single value representing the attribute.
The value ``all`` for attribute has special significance and will listen for any
state change within the specified entity, and supply the callback functions with
the entire state dictionary for the specified entity rather than an individual
attribute value.
new (optional): If ``new`` is supplied as a parameter, callbacks will only be made if the
state of the selected attribute (usually state) in the new state match the value
of ``new``.
old (optional): If ``old`` is supplied as a parameter, callbacks will only be made if the
state of the selected attribute (usually state) in the old state match the value
of ``old``.
duration (int, optional): If ``duration`` is supplied as a parameter, the callback will not
fire unless the state listened for is maintained for that number of seconds. This
requires that a specific attribute is specified (or the default of ``state`` is used),
and should be used in conjunction with the ``old`` or ``new`` parameters, or both. When
the callback is called, it is supplied with the values of ``entity``, ``attr``, ``old``,
and ``new`` that were current at the time the actual event occurred, since the assumption
is that none of them have changed in the intervening period.
If you use ``duration`` when listening for an entire device type rather than a specific
entity, or for all state changes, you may get unpredictable results, so it is recommended
that this parameter is only used in conjunction with the state of specific entities.
timeout (int, optional): If ``timeout`` is supplied as a parameter, the callback will be created as normal,
but after ``timeout`` seconds, the callback will be removed. If activity for the listened state has
occurred that would trigger a duration timer, the duration timer will still be fired even though the
callback has been deleted.
immediate (bool, optional): It enables the countdown for a delay parameter to start
at the time, if given. If the ``duration`` parameter is not given, the callback runs immediately.
What this means is that after the callback is registered, rather than requiring one or more
state changes before it runs, it immediately checks the entity's states based on given
parameters. If the conditions are right, the callback runs immediately at the time of
registering. This can be useful if, for instance, you want the callback to be triggered
immediately if a light is already `on`, or after a ``duration`` if given.
If ``immediate`` is in use, and ``new`` and ``duration`` are both set, AppDaemon will check
if the entity is already set to the new state and if so it will start the clock
immediately. If ``new`` and ``duration`` are not set, ``immediate`` will trigger the callback
immediately and report in its callback the new parameter as the present state of the
entity. If ``attribute`` is specified, the state of the attribute will be used instead of
state. In these cases, ``old`` will be ignored and when the callback is triggered, its
state will be set to ``None``.
oneshot (bool, optional): If ``True``, the callback will be automatically cancelled
after the first state change that results in a callback.
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description. In most cases,
it is safe to ignore this parameter. The value ``global`` for namespace has special
significance and means that the callback will listen to state updates from any plugin.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Sets which thread from the worker pool the callback will be
run by (0 - number of threads -1).
*kwargs (optional): Zero or more keyword arguments that will be supplied to the callback
when it is called.
Notes:
The ``old`` and ``new`` args can be used singly or together.
Returns:
A unique identifier that can be used to cancel the callback if required. Since variables
created within object methods are local to the function they are created in, and in all
likelihood, the cancellation will be invoked later in a different function, it is
recommended that handles are stored in the object namespace, e.g., `self.handle`.
Examples:
Listen for any state change and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback)
Listen for any state change involving a light and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light")
Listen for a state change involving `light.office1` and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1")
Listen for a state change involving `light.office1` and return the entire state as a dict.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "all")
Listen for a change involving the brightness attribute of `light.office1` and return the
brightness attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "brightness")
Listen for a state change involving `light.office1` turning on and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on")
Listen for a change involving `light.office1` changing from brightness 100 to 200 and return the
brightness attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "brightness", old = "100", new = "200")
Listen for a state change involving `light.office1` changing to state on and remaining on for a minute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on", duration = 60)
Listen for a state change involving `light.office1` changing to state on and remaining on for a minute
trigger the delay immediately if the light is already on.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on", duration = 60, immediate = True)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
name = self.name
if entity is not None and "." in entity:
await self._check_entity(namespace, entity)
self.logger.debug("Calling listen_state for %s", self.name)
return await self.AD.state.add_state_callback(name, namespace, entity, callback, kwargs)
@utils.sync_wrapper
async def cancel_listen_state(self, handle):
"""Cancels a ``listen_state()`` callback.
This will mean that the App will no longer be notified for the specific
state change that has been cancelled. Other state changes will continue
to be monitored.
Args:
handle: The handle returned when the ``listen_state()`` call was made.
Returns:
None.
Examples:
>>> self.cancel_listen_state(self.office_light_handle)
"""
self.logger.debug("Canceling listen_state for %s", self.name)
await self.AD.state.cancel_state_callback(handle, self.name)
@utils.sync_wrapper
async def info_listen_state(self, handle):
"""Gets information on state a callback from its handle.
Args:
handle: The handle returned when the ``listen_state()`` call was made.
Returns:
The values supplied for ``entity``, ``attribute``, and ``kwargs`` when
the callback was initially created.
Examples:
>>> entity, attribute, kwargs = self.info_listen_state(self.handle)
"""
self.logger.debug("Calling info_listen_state for %s", self.name)
return await self.AD.state.info_state_callback(handle, self.name)
@utils.sync_wrapper
async def get_state(self, entity_id=None, attribute=None, default=None, copy=True, **kwargs):
"""Gets the state of any component within Home Assistant.
State updates are continuously tracked, so this call runs locally and does not require
AppDaemon to call back to Home Assistant. In other words, states are updated using a
push-based approach instead of a pull-based one.
Args:
entity_id (str, optional): This is the name of an entity or device type. If just
a device type is provided, e.g., `light` or `binary_sensor`, `get_state()`
will return a dictionary of all devices of that type, indexed by the ``entity_id``,
containing all the state for each entity. If a fully qualified ``entity_id``
is provided, ``get_state()`` will return the state attribute for that entity,
e.g., ``on`` or ``off`` for a light.
attribute (str, optional): Name of an attribute within the entity state object.
If this parameter is specified in addition to a fully qualified ``entity_id``,
a single value representing the attribute will be returned. The value ``all``
for attribute has special significance and will return the entire state
dictionary for the specified entity rather than an individual attribute value.
default (any, optional): The value to return when the requested attribute or the
whole entity doesn't exist (Default: ``None``).
copy (bool, optional): By default, a copy of the stored state object is returned.
When you set ``copy`` to ``False``, you get the same object as is stored
internally by AppDaemon. Avoiding the copying brings a small performance gain,
but also gives you write-access to the internal AppDaemon data structures,
which is dangerous. Only disable copying when you can guarantee not to modify
the returned state object, e.g., you do read-only operations.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
Returns:
The entire state of Home Assistant at that given time, if if ``get_state()``
is called with no parameters. This will consist of a dictionary with a key
for each entity. Under that key will be the standard entity state information.
Examples:
Get the state of the entire system.
>>> state = self.get_state()
Get the state of all switches in the system.
>>> state = self.get_state("switch")
Get the state attribute of `light.office_1`.
>>> state = self.get_state("light.office_1")
Get the brightness attribute of `light.office_1`.
>>> state = self.get_state("light.office_1", attribute="brightness")
Get the entire state of `light.office_1`.
>>> state = self.get_state("light.office_1", attribute="all")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return await self.AD.state.get_state(
self.name, namespace, entity_id, attribute, default, copy, **kwargs
)
@utils.sync_wrapper
async def set_state(self, entity_id, **kwargs):
"""Updates the state of the specified entity.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
state: New state value to be set.
attributes (optional): Entity's attributes to be updated.
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
replace(bool, optional): If a `replace` flag is given and set to ``True`` and ``attributes``
is provided, AD will attempt to replace its internal entity register with the newly
supplied attributes completely. This can be used to replace attributes in an entity
which are no longer needed. Do take note this is only possible for internal entity state.
For plugin based entities, this is not recommended, as the plugin will mostly replace
the new values, when next it updates.
Returns:
A dictionary that represents the new state of the updated entity.
Examples:
Update the state of an entity.
>>> self.set_state("light.office_1", state="off")
Update the state and attribute of an entity.
>>> self.set_state("light.office_1", state = "on", attributes = {"color_name": "red"})
Update the state of an entity within the specified namespace.
>>> self.set_state("light.office_1", state="off", namespace ="hass")
"""
self.logger.debug("set state: %s, %s", entity_id, kwargs)
namespace = self._get_namespace(**kwargs)
await self._check_entity(namespace, entity_id)
if "namespace" in kwargs:
del kwargs["namespace"]
return await self.AD.state.set_state(self.name, namespace, entity_id, **kwargs)
#
# Service
#
@staticmethod
def _check_service(service):
if service.find("/") == -1:
raise ValueError("Invalid Service Name: {}".format(service))
def register_service(self, service, cb, **kwargs):
"""Registers a service that can be called from other apps, the REST API and the Event Stream
Using this function, an App can register a function to be available in the service registry.
This will automatically make it available to other apps using the `call_service()` API call, as well as publish
it as a service in the REST API and make it available to the `call_service` command in the event stream.
Args:
service: Name of the service, in the format `domain/service`. If the domain does not exist it will be created
cb: A reference to the function to be called when the service is requested. This function may be a regular
function, or it may be asynch. Note that if it is an async function, it will run on AppDaemon's main loop
meaning that any issues with the service could result in a delay of AppDaemon's core functions.
Returns:
None
Examples:
>>> self.register_service("myservices/service1", mycallback)
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("register_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
self.AD.services.register_service(namespace, d, s, cb, __async="auto", **kwargs)
@utils.sync_wrapper
async def call_service(self, service, **kwargs):
"""Calls a HASS service within AppDaemon.
This function can call any service and provide any required parameters.
Available services can be found using the developer tools in the UI.
For `listed services`, the part before the first period is the ``domain``,
and the part after is the ``service name`. For instance, `light/turn_on`
has a domain of `light` and a service name of `turn_on`.
Args:
service (str): The service name.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
**kwargs: Each service has different parameter requirements. This argument
allows you to specify a comma-separated list of keyword value pairs, e.g.,
`entity_id = light.office_1`. These parameters will be different for
every service and can be discovered using the developer tools. Most all
service calls require an ``entity_id``.
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
Returns:
None.
Examples:
HASS
>>> self.call_service("light/turn_on", entity_id = "light.office_lamp", color_name = "red")
>>> self.call_service("notify/notify", title = "Hello", message = "Hello World")
MQTT
>>> call_service("mqtt/subscribe", topic="homeassistant/living_room/light", qos=2)
>>> call_service("mqtt/publish", topic="homeassistant/living_room/light", payload="on")
Utility
>>> call_service("app/restart", app="notify_app", namespace="appdaemon")
>>> call_service("app/stop", app="lights_app", namespace="appdaemon")
>>> call_service("app/reload", namespace="appdaemon")
For Utility, it is important that the `namespace` arg is set to ``appdaemon``
as no app can work within that `namespace`. If not namespace is specified,
calling this function will rise an error.
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("call_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
kwargs["__name"] = self.name
return await self.AD.services.call_service(namespace, d, s, kwargs)
@utils.sync_wrapper
async def run_sequence(self, sequence, **kwargs):
"""Run an AppDaemon Sequence. Sequences are defined in a valid apps.yaml file or inline, and are sequences of
service calls.
Args:
sequence: The sequence name, referring to the correct entry in apps.yaml, or a dict containing
actual commands to run
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
Returns:
A handle that can be used with `cancel_sequence()` to terminate the script.
Examples:
Run a yaml-defined sequence called "sequence.front_room_scene".
>>> handle = self.run_sequence("sequence.front_room_scene")
Run an inline sequence.
>>> handle = self.run_sequence([{"light.turn_on": {"entity_id": "light.office_1"}}, {"sleep": 5}, {"light.turn_off":
{"entity_id": "light.office_1"}}])
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling run_sequence() for %s", self.name)
return await self.AD.sequences.run_sequence( _name, namespace, sequence, **kwargs)
@utils.sync_wrapper
async def cancel_sequence(self, handle):
"""Cancel an AppDaemon Sequence.
Args:
handle: The handle returned by the `run_sequence()` call
Returns:
None.
Examples:
>>> self.run_sequence(handle)
"""
_name = self.name
self.logger.debug("Calling run_sequence() for %s", self.name)
await self.AD.sequences.cancel_sequence( _name, handle)
#
# Events
#
@utils.sync_wrapper
async def listen_event(self, callback, event=None, **kwargs):
"""Registers a callback for a specific event, or any event.
Args:
callback: Function to be invoked when the requested state change occurs.
It must conform to the standard State Callback format documented `here <APPGUIDE.html#state-callbacks>`__
event (optional): Name of the event to subscribe to. Can be a standard
Home Assistant event such as `service_registered` or an arbitrary
custom event such as `"MODE_CHANGE"`. If no event is specified,
`listen_event()` will subscribe to all events.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter. The value ``global``
for namespace has special significance, and means that the callback will
listen to state updates from any plugin.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
timeout (int, optional): If ``timeout`` is supplied as a parameter, the callback will be created as normal,
but after ``timeout`` seconds, the callback will be removed.
**kwargs (optional): One or more keyword value pairs representing App specific
parameters to supply to the callback. If the keywords match values within the
event data, they will act as filters, meaning that if they don't match the
values, the callback will not fire.
As an example of this, a `Minimote` controller when activated will generate
an event called zwave.scene_activated, along with 2 pieces of data that are
specific to the event - entity_id and scene. If you include keyword values
for either of those, the values supplied to the `listen_event()` call must
match the values in the event or it will not fire. If the keywords do not
match any of the data in the event they are simply ignored.
Filtering will work with any event type, but it will be necessary to figure
out the data associated with the event to understand what values can be
filtered on. This can be achieved by examining Home Assistant's `logfiles`
when the event fires.
Returns:
A handle that can be used to cancel the callback.
Examples:
Listen all `"MODE_CHANGE"` events.
>>> self.listen_event(self.mode_event, "MODE_CHANGE")
Listen for a `minimote` event activating scene 3.
>>> self.listen_event(self.generic_event, "zwave.scene_activated", scene_id = 3)
Listen for a `minimote` event activating scene 3 from a specific `minimote`.
>>> self.listen_event(self.generic_event, "zwave.scene_activated", entity_id = "minimote_31", scene_id = 3)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling listen_event for %s", self.name)
return await self.AD.events.add_event_callback(_name, namespace, callback, event, **kwargs)
@utils.sync_wrapper
async def cancel_listen_event(self, handle):
"""Cancels a callback for a specific event.
Args:
handle: A handle returned from a previous call to ``listen_event()``.
Returns:
None.
Examples:
>>> self.cancel_listen_event(handle)
"""
self.logger.debug("Canceling listen_event for %s", self.name)
await self.AD.events.cancel_event_callback(self.name, handle)
@utils.sync_wrapper
async def info_listen_event(self, handle):
"""Gets information on an event callback from its handle.
Args:
handle: The handle returned when the ``listen_event()`` call was made.
Returns:
The values (service, kwargs) supplied when the callback was initially created.
Examples:
>>> service, kwargs = self.info_listen_event(handle)
"""
self.logger.debug("Calling info_listen_event for %s", self.name)
return await self.AD.events.info_event_callback(self.name, handle)
@utils.sync_wrapper
async def fire_event(self, event, **kwargs):
"""Fires an event on the AppDaemon bus, for apps and plugins.
Args:
event: Name of the event. Can be a standard Home Assistant event such as
`service_registered` or an arbitrary custom event such as "MODE_CHANGE".
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
**kwargs (optional): Zero or more keyword arguments that will be supplied as
part of the event.
Returns:
None.
Examples:
>>> self.fire_event("MY_CUSTOM_EVENT", jam="true")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self.AD.events.fire_event(namespace, event, **kwargs)
#
# Time
#
def parse_utc_string(self, utc_string):
"""Converts a UTC to its string representation.
Args:
utc_string (str): A string that contains a date and time to convert.
Returns:
An UTC object that is equivalent to the date and time contained in `utc_string`.
"""
return datetime.datetime(*map(
int, re.split(r'[^\d]', utc_string)[:-1]
)).timestamp() + self.get_tz_offset() * 60
@staticmethod
def get_tz_offset():
"""Returns the timezone difference between UTC and Local Time."""
utc_offset_min = int(round(
(datetime.datetime.now()
- datetime.datetime.utcnow()).total_seconds())
) / 60 # round for taking time twice
utc_offset_h = utc_offset_min / 60
# we do not handle 1/2 h timezone offsets
assert utc_offset_min == utc_offset_h * 60
return utc_offset_min
@staticmethod
def convert_utc(utc):
"""Gets a `datetime` object for the specified UTC.
Home Assistant provides timestamps of several different sorts that may be
used to gain additional insight into state changes. These timestamps are
in UTC and are coded as `ISO 8601` combined date and time strings. This function
will accept one of these strings and convert it to a localised Python
`datetime` object representing the timestamp.
Args:
utc: An `ISO 8601` encoded date and time string in the following
format: `2016-07-13T14:24:02.040658-04:00`
Returns:
A localised Python `datetime` object representing the timestamp.
"""
return iso8601.parse_date(utc)
@utils.sync_wrapper
async def sun_up(self):
"""Determines if the sun is currently up.
Returns:
bool: ``True`` if the sun is up, ``False`` otherwise.
Examples:
>>> if self.sun_up():
>>> #do something
"""
return await self.AD.sched.sun_up()
@utils.sync_wrapper
async def sun_down(self):
"""Determines if the sun is currently down.
Returns:
bool: ``True`` if the sun is down, ``False`` otherwise.
Examples:
>>> if self.sun_down():
>>> #do something
"""
return await self.AD.sched.sun_down()
@utils.sync_wrapper
async def parse_time(self, time_str, name=None, aware=False):
"""Creates a `time` object from its string representation.
This functions takes a string representation of a time, or sunrise,
or sunset offset and converts it to a datetime.time object.
Args:
time_str (str): A representation of the time in a string format with one
of the following formats:
a. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
b. ``sunrise|sunset [+|- HH:MM:SS]`` - time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes and seconds.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
aware (bool, optional): If ``True`` the created time object will be aware of timezone.
Returns:
A `time` object, representing the time given in the `time_str` argument.
Examples:
>>> self.parse_time("17:30:00")
17:30:00
>>> time = self.parse_time("sunrise")
04:33:17
>>> time = self.parse_time("sunset + 00:30:00")
19:18:48
>>> time = self.parse_time("sunrise + 01:00:00")
05:33:17
"""
return await self.AD.sched.parse_time(time_str, name, aware)
@utils.sync_wrapper
async def parse_datetime(self, time_str, name=None, aware=False):
"""Creates a `datetime` object from its string representation.
This function takes a string representation of a date and time, or sunrise,
or sunset offset and converts it to a `datetime` object.
Args:
time_str (str): A string representation of the datetime with one of the
following formats:
a. ``YY-MM-DD-HH:MM:SS`` - the date and time in Year, Month, Day, Hours,
Minutes, and Seconds, 24 hour format.
b. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
c. ``sunrise|sunset [+|- HH:MM:SS]`` - time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes and seconds.
If the ``HH:MM:SS`` format is used, the resulting datetime object will have
today's date.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
aware (bool, optional): If ``True`` the created datetime object will be aware
of timezone.
Returns:
A `datetime` object, representing the time and date given in the
`time_str` argument.
Examples:
>>> self.parse_datetime("2018-08-09 17:30:00")
2018-08-09 17:30:00
>>> self.parse_datetime("17:30:00")
2019-08-15 17:30:00
>>> self.parse_datetime("sunrise")
2019-08-16 05:33:17
>>> self.parse_datetime("sunset + 00:30:00")
2019-08-16 19:18:48
>>> self.parse_datetime("sunrise + 01:00:00")
2019-08-16 06:33:17
"""
return await self.AD.sched.parse_datetime(time_str, name, aware)
@utils.sync_wrapper
async def get_now(self):
"""Returns the current Local Date and Time.
Examples:
>>> self.get_now()
2019-08-16 21:17:41.098813+00:00
"""
return await self.AD.sched.get_now()
@utils.sync_wrapper
async def get_now_ts(self):
"""Returns the current Local Timestamp.
Examples:
>>> self.get_now_ts()
1565990318.728324
"""
return await self.AD.sched.get_now_ts()
@utils.sync_wrapper
async def now_is_between(self, start_time, end_time, name=None):
"""Determines is the current `time` is within the specified start and end times.
This function takes two string representations of a ``time``, or ``sunrise`` or ``sunset``
offset and returns ``true`` if the current time is between those 2 times. Its
implementation can correctly handle transitions across midnight.
Args:
start_time (str): A string representation of the start time.
end_time (str): A string representation of the end time.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
Returns:
bool: ``True`` if the current time is within the specified start and end times,
``False`` otherwise.
Notes:
The string representation of the ``start_time`` and ``end_time`` should follows
one of these formats:
a. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
b. ``sunrise|sunset [+|- HH:MM:SS]``- time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes,
and Seconds.
Examples:
>>> if self.now_is_between("17:30:00", "08:00:00"):
>>> #do something
>>> if self.now_is_between("sunset - 00:45:00", "sunrise + 00:45:00"):
>>> #do something
"""
return await self.AD.sched.now_is_between(start_time, end_time, name)
@utils.sync_wrapper
async def sunrise(self, aware=False):
"""Returns a `datetime` object that represents the next time Sunrise will occur.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.sunrise()
2019-08-16 05:33:17
"""
return await self.AD.sched.sunrise(aware)
@utils.sync_wrapper
async def sunset(self, aware=False):
"""Returns a `datetime` object that represents the next time Sunset will occur.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.sunset()
2019-08-16 19:48:48
"""
return await self.AD.sched.sunset(aware)
@utils.sync_wrapper
async def time(self):
"""Returns a localised `time` object representing the current Local Time.
Use this in preference to the standard Python ways to discover the current time,
especially when using the "Time Travel" feature for testing.
Examples:
>>> self.time()
20:15:31.295751
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz).time()
@utils.sync_wrapper
async def datetime(self, aware=False):
"""Returns a `datetime` object representing the current Local Date and Time.
Use this in preference to the standard Python ways to discover the current
datetime, especially when using the "Time Travel" feature for testing.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.datetime()
2019-08-15 20:15:55.549379
"""
if aware is True:
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz)
else:
return await self.AD.sched.get_now_naive()
@utils.sync_wrapper
async def date(self):
"""Returns a localised `date` object representing the current Local Date.
Use this in preference to the standard Python ways to discover the current date,
especially when using the "Time Travel" feature for testing.
Examples:
>>> self.date()
2019-08-15
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz).date()
def get_timezone(self):
"""Returns the current time zone."""
return self.AD.time_zone
#
# Scheduler
#
@utils.sync_wrapper
async def cancel_timer(self, handle):
"""Cancels a previously created timer.
Args:
handle: A handle value returned from the original call to create the timer.
Returns:
None.
Examples:
>>> self.cancel_timer(handle)
"""
name = self.name
self.logger.debug("Canceling timer with handle %s for %s", handle, self.name)
await self.AD.sched.cancel_timer(name, handle)
@utils.sync_wrapper
async def info_timer(self, handle):
"""Gets information on a scheduler event from its handle.
Args:
handle: The handle returned when the scheduler call was made.
Returns:
`time` - datetime object representing the next time the callback will be fired
`interval` - repeat interval if applicable, `0` otherwise.
`kwargs` - the values supplied when the callback was initially created.
or ``None`` - if handle is invalid or timer no longer exists.
Examples:
>>> time, interval, kwargs = self.info_timer(handle)
"""
return await self.AD.sched.info_timer(handle, self.name)
@utils.sync_wrapper
async def run_in(self, callback, delay, **kwargs):
"""Runs the callback in a defined number of seconds.
This is used to add a delay, for instance, a 60 second delay before
a light is turned off after it has been triggered by a motion detector.
This callback should always be used instead of ``time.sleep()`` as
discussed previously.
Args:
callback: Function to be invoked when the requested state change occurs.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
delay (int): Delay, in seconds before the callback is invoked.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run the specified callback after 10 seconds.
>>> self.handle = self.run_in(self.run_in_c, 10)
Run the specified callback after 10 seconds with a keyword arg (title).
>>> self.handle = self.run_in(self.run_in_c, 5, title = "run_in5")
"""
name = self.name
self.logger.debug("Registering run_in in %s seconds for %s", delay, name)
# convert seconds to an int if possible since a common pattern is to
# pass this through from the config file which is a string
exec_time = await self.get_now() + timedelta(seconds=int(delay))
handle = await self.AD.sched.insert_schedule(
name,
exec_time,
callback,
False,
None,
**kwargs)
return handle
@utils.sync_wrapper
async def run_once(self, callback, start, **kwargs):
"""Runs the callback once, at the specified time of day.
Args:
callback: Function to be invoked at the specified time of day.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur. If the time
specified is in the past, the callback will occur the ``next day`` at
the specified time.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run at 4pm today, or 4pm tomorrow if it is already after 4pm.
>>> runtime = datetime.time(16, 0, 0)
>>> handle = self.run_once(self.run_once_c, runtime)
Run today at 10:30 using the `parse_time()` function.
>>> handle = self.run_once(self.run_once_c, "10:30:00")
Run at sunset.
>>> handle = self.run_once(self.run_once_c, "sunset")
Run an hour after sunrise.
>>> handle = self.run_once(self.run_once_c, "sunrise + 01:00:00")
"""
if type(start) == datetime.time:
when = start
elif type(start) == str:
start_time_obj = await self.AD.sched._parse_time(start, self.name)
when = start_time_obj["datetime"].time()
else:
raise ValueError("Invalid type for start")
name = self.name
self.logger.debug("Registering run_once at %s for %s", when, name)
now = await self.get_now()
today = now.date()
event = datetime.datetime.combine(today, when)
aware_event = self.AD.sched.convert_naive(event)
if aware_event < now:
one_day = datetime.timedelta(days=1)
aware_event = aware_event + one_day
handle = await self.AD.sched.insert_schedule(
name, aware_event, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_at(self, callback, start, **kwargs):
"""Runs the callback once, at the specified time of day.
Args:
callback: Function to be invoked at the specified time of day.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
The ``run_at()`` function will ``raise`` an exception if the specified time is in the ``past``.
Examples:
Run at 4pm today.
>>> runtime = datetime.time(16, 0, 0)
>>> today = datetime.date.today()
>>> event = datetime.datetime.combine(today, runtime)
>>> handle = self.run_at(self.run_at_c, event)
Run today at 10:30 using the `parse_time()` function.
>>> handle = self.run_at(self.run_at_c, "10:30:00")
Run on a specific date and time.
>>> handle = self.run_at(self.run_at_c, "2018-12-11 10:30:00")
Run at the next sunset.
>>> handle = self.run_at(self.run_at_c, "sunset")
Run an hour after the next sunrise.
>>> handle = self.run_at(self.run_at_c, "sunrise + 01:00:00")
"""
if type(start) == datetime.datetime:
when = start
elif type(start) == str:
start_time_obj = await self.AD.sched._parse_time(start, self.name)
when = start_time_obj["datetime"]
else:
raise ValueError("Invalid type for start")
aware_when = self.AD.sched.convert_naive(when)
name = self.name
self.logger.debug("Registering run_at at %s for %s", when, name)
now = await self.get_now()
if aware_when < now:
raise ValueError(
"{}: run_at() Start time must be "
"in the future".format(self.name)
)
handle = await self.AD.sched.insert_schedule(
name, aware_when, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_daily(self, callback, start, **kwargs):
"""Runs the callback at the same time every day.
Args:
callback: Function to be invoked every day at the specified time.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur. If the time
specified is in the past, the callback will occur the ``next day`` at
the specified time.
When specifying sunrise or sunset relative times using the ``parse_datetime()``
format, the time of the callback will be adjusted every day to track the actual
value of sunrise or sunset.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run daily at 7pm.
>>> runtime = datetime.time(19, 0, 0)
>>> self.run_daily(self.run_daily_c, runtime)
Run at 10:30 every day using the `parse_time()` function.
>>> handle = self.run_daily(self.run_daily_c, "10:30:00")
Run every day at sunrise.
>>> handle = self.run_daily(self.run_daily_c, "sunrise")
Run every day an hour after sunset.
>>> handle = self.run_daily(self.run_daily_c, "sunset + 01:00:00")
"""
info = None
when = None
if type(start) == datetime.time:
when = start
elif type(start) == str:
info = await self.AD.sched._parse_time(start, self.name)
else:
raise ValueError("Invalid type for start")
if info is None or info["sun"] is None:
if when is None:
when = info["datetime"].time()
aware_now = await self.get_now()
now = self.AD.sched.make_naive(aware_now)
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
event = event + datetime.timedelta(days=1)
handle = await self.run_every(callback, event, 24 * 60 * 60, **kwargs)
elif info["sun"] == "sunrise":
kwargs["offset"] = info["offset"]
handle = await self.run_at_sunrise(callback, **kwargs)
else:
kwargs["offset"] = info["offset"]
handle = await self.run_at_sunset(callback, **kwargs)
return handle
@utils.sync_wrapper
async def run_hourly(self, callback, start, **kwargs):
"""Runs the callback at the same time every hour.
Args:
callback: Function to be invoked every hour at the specified time.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``time`` object that specifies when the callback will occur,
the hour component of the time object is ignored. If the time specified
is in the past, the callback will occur the ``next hour`` at the specified
time. If time is not supplied, the callback will start an hour from the
time that ``run_hourly()`` was executed.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every hour, on the hour.
>>> runtime = datetime.time(0, 0, 0)
>>> self.run_hourly(self.run_hourly_c, runtime)
"""
now = await self.get_now()
if start is None:
event = now + datetime.timedelta(hours=1)
else:
event = now
event = event.replace(minute=start.minute, second=start.second)
if event < now:
event = event + datetime.timedelta(hours=1)
handle = await self.run_every(callback, event, 60 * 60, **kwargs)
return handle
@utils.sync_wrapper
async def run_minutely(self, callback, start, **kwargs):
"""Runs the callback at the same time every minute.
Args:
callback: Function to be invoked every minute.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``time`` object that specifies when the callback will occur,
the hour and minute components of the time object are ignored. If the
time specified is in the past, the callback will occur the ``next minute`` at
the specified time. If time is not supplied, the callback will start a
minute from the time that ``run_minutely()`` was executed.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every minute on the minute.
>>> time = datetime.time(0, 0, 0)
>>> self.run_minutely(self.run_minutely_c, time)
"""
now = await self.get_now()
if start is None:
event = now + datetime.timedelta(minutes=1)
else:
event = now
event = event.replace(second=start.second)
if event < now:
event = event + datetime.timedelta(minutes=1)
handle = await self.run_every(callback, event, 60, **kwargs)
return handle
@utils.sync_wrapper
async def run_every(self, callback, start, interval, **kwargs):
"""Runs the callback with a configurable delay starting at a specific time.
Args:
callback: Function to be invoked when the time interval is reached.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``datetime`` object that specifies when the initial callback
will occur.
interval: Frequency (expressed in seconds) in which the callback should be executed.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every 17 minutes starting in 2 hours time.
>>> self.run_every(self.run_every_c, time, 17 * 60)
"""
name = self.name
now = await self.get_now()
aware_start = self.AD.sched.convert_naive(start)
if aware_start < now:
raise ValueError("start cannot be in the past")
self.logger.debug("Registering run_every starting %s in %ss intervals for %s", aware_start, interval, name)
handle = await self.AD.sched.insert_schedule(
name, aware_start, callback, True,
None, interval=interval, **kwargs)
return handle
@utils.sync_wrapper
async def _schedule_sun(self, name, type_, callback, **kwargs):
if type_ == "next_rising":
event = self.AD.sched.next_sunrise()
else:
event = self.AD.sched.next_sunset()
handle = await self.AD.sched.insert_schedule(
name, event, callback, True, type_, **kwargs)
return handle
@utils.sync_wrapper
async def run_at_sunset(self, callback, **kwargs):
"""Runs a callback every day at or around sunset.
Args:
callback: Function to be invoked at or around sunset. It must conform to the
standard Scheduler Callback format documented `here <APPGUIDE.html#about-schedule-callbacks>`__.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
offset (int, optional): The time in seconds that the callback should be delayed after
sunrise. A negative value will result in the callback occurring before sunrise.
This parameter cannot be combined with ``random_start`` or ``random_end``.
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Example using timedelta.
>>> self.run_at_sunset(self.sun, offset = datetime.timedelta(minutes = -45).total_seconds())
Or you can just do the math yourself.
>>> self.run_at_sunset(self.sun, offset = 30 * 60)
Run at a random time +/- 60 minutes from sunset.
>>> self.run_at_sunset(self.sun, random_start = -60*60, random_end = 60*60)
Run at a random time between 30 and 60 minutes before sunset.
>>> self.run_at_sunset(self.sun, random_start = -60*60, random_end = 30*60)
"""
name = self.name
self.logger.debug("Registering run_at_sunset with kwargs = %s for %s", kwargs, name)
handle = await self._schedule_sun(name, "next_setting", callback, **kwargs)
return handle
@utils.sync_wrapper
async def run_at_sunrise(self, callback, **kwargs):
"""Runs a callback every day at or around sunrise.
Args:
callback: Function to be invoked at or around sunrise. It must conform to the
standard Scheduler Callback format documented `here <APPGUIDE.html#about-schedule-callbacks>`__.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
offset (int, optional): The time in seconds that the callback should be delayed after
sunrise. A negative value will result in the callback occurring before sunrise.
This parameter cannot be combined with ``random_start`` or ``random_end``.
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run 45 minutes before sunset.
>>> self.run_at_sunrise(self.sun, offset = datetime.timedelta(minutes = -45).total_seconds())
Or you can just do the math yourself.
>>> self.run_at_sunrise(self.sun, offset = 30 * 60)
Run at a random time +/- 60 minutes from sunrise.
>>> self.run_at_sunrise(self.sun, random_start = -60*60, random_end = 60*60)
Run at a random time between 30 and 60 minutes before sunrise.
>>> self.run_at_sunrise(self.sun, random_start = -60*60, random_end = 30*60)
"""
name = self.name
self.logger.debug("Registering run_at_sunrise with kwargs = %s for %s", kwargs, name)
handle = await self._schedule_sun(name, "next_rising", callback, **kwargs)
return handle
#
# Dashboard
#
def dash_navigate(self, target, timeout=-1, ret=None, sticky=0):
"""Forces all connected Dashboards to navigate to a new URL.
Args:
target (str): Name of the new Dashboard to navigate to (e.g., ``/SensorPanel``).
Note that this value is not a URL.
timeout (int): Length of time to stay on the new dashboard before returning
to the original. This argument is optional and if not specified, the
navigation will be permanent. Note that if there is a click or touch on
the new panel before the timeout expires, the timeout will be cancelled.
ret (str): Dashboard to return to after the timeout has elapsed.
sticky (int): Specifies whether or not to return to the original dashboard
after it has been clicked on. The default behavior (``sticky=0``) is to remain
on the new dashboard if clicked, or return to the original otherwise.
By using a different value (sticky= 5), clicking the dashboard will extend
the amount of time (in seconds), but it will return to the original dashboard
after a period of inactivity equal to timeout.
Returns:
None.
Examples:
Switch to AlarmStatus Panel then return to current panel after 10 seconds.
>>> self.dash_navigate("/AlarmStatus", timeout=10)
Switch to Locks Panel then return to Main panel after 10 seconds.
>>> self.dash_navigate("/Locks", timeout=10, ret="/SensorPanel")
"""
kwargs = {"command": "navigate", "target": target, "sticky": sticky}
if timeout != -1:
kwargs["timeout"] = timeout
if ret is not None:
kwargs["return"] = ret
self.fire_event("__HADASHBOARD_EVENT", **kwargs)
#
# Async
#
async def run_in_executor(self, func, *args, **kwargs):
return await utils.run_in_executor(self, func, *args, **kwargs)
@utils.sync_wrapper
async def create_task(self, coro, callback=None, **kwargs):
"""Schedules a Coroutine to be executed.
Args:
coro: The coroutine object (`not coroutine function`) to be executed.
callback: The non-async callback to be executed when complete.
**kwargs (optional): Any additional keyword arguments to send the callback.
Returns:
A Future, which can be cancelled by calling f.cancel().
Examples:
>>> f = self.create_task(asyncio.sleep(3), callback=self.coro_callback)
>>>
>>> def coro_callback(self, kwargs):
"""
# get stuff we'll need to fake scheduler call
sched_data = {
"id": uuid.uuid4().hex,
"name": self.name,
"objectid": self.AD.app_management.objects[self.name]["id"],
"type": "scheduler",
"function": callback,
"pin_app": await self.get_app_pin(),
"pin_thread": await self.get_pin_thread(),
}
def callback_inner(f):
try:
# TODO: use our own callback type instead of borrowing
# from scheduler
kwargs["result"] = f.result()
sched_data["kwargs"] = kwargs
self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))
# callback(f.result(), kwargs)
except asyncio.CancelledError:
pass
f = asyncio.ensure_future(coro)
if callback is not None:
self.logger.debug("Adding add_done_callback for coro %s for %s", f, self.name)
f.add_done_callback(callback_inner)
self.AD.futures.add_future(self.name, f)
return f
@staticmethod
async def sleep(delay, result=None):
"""Pause execution for a certain time span
(not available in sync apps)
Args:
delay (int): Number of seconds to pause.
result (optional): Result to return upon delay completion.
Returns:
Result or `None`.
Notes:
This function is not available in sync apps.
Examples:
>>> async def myfunction(self):
>>> await self.sleep(5)
"""
is_async = None
try:
asyncio.get_event_loop()
is_async = True
except RuntimeError:
is_async = False
if not is_async:
raise RuntimeError("The sleep method is for use in ASYNC methods only")
return await asyncio.sleep(delay, result=result)
#
# Other
#
def run_in_thread(self, callback, thread, **kwargs):
"""Schedules a callback to be run in a different thread from the current one.
Args:
callback: Function to be run on the new thread.
thread (int): Thread number (0 - number of threads).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
None.
Examples:
>>> self.run_in_thread(my_callback, 8)
"""
self.run_in(callback, 0, pin=False, pin_thread=thread, **kwargs)
@utils.sync_wrapper
async def get_thread_info(self):
"""Gets information on AppDaemon worker threads.
Returns:
A dictionary containing all the information for AppDaemon worker threads.
Examples:
>>> thread_info = self.get_thread_info()
"""
return await self.AD.threading.get_thread_info()
@utils.sync_wrapper
async def get_scheduler_entries(self):
"""Gets information on AppDaemon scheduler entries.
Returns:
A dictionary containing all the information for entries in the AppDaemon scheduler.
Examples:
>>> schedule = self.get_scheduler_entries()
"""
return await self.AD.sched.get_scheduler_entries()
@utils.sync_wrapper
async def get_callback_entries(self):
"""Gets information on AppDaemon callback entries.
Returns:
A dictionary containing all the information for entries in the AppDaemon state,
and event callback table.
Examples:
>>> callbacks = self.get_callback_entries()
"""
return await self.AD.callbacks.get_callback_entries()
@utils.sync_wrapper
async def depends_on_module(self, *modules):
"""Registers a global_modules dependency for an app.
Args:
*modules: Modules to register a dependency on.
Returns:
None.
Examples:
>>> import somemodule
>>> import anothermodule
>>> # later
>>> self.depends_on_module([somemodule)
"""
return await self.AD.app_management.register_module_dependency(
self.name,
*modules
)
<|code_end|>
appdaemon/events.py
<|code_start|>"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, __name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
__name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(__name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[__name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[__name]["pin_thread"]
if __name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[__name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[__name][handle] = {
"name": __name,
"id": self.AD.app_management.objects[__name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
__name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity("admin", "event_callback.{}".format(handle), "active",
{"app": __name, "event_name": event, "function": cb.__name__,
"pinned": pin_app, "pinned_thread": pin_thread, "fired": 0,
"executed": 0, "kwargs": kwargs})
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data['event_type'])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock for time travel
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data['event_type'] == "state_changed":
if 'entity_id' in data['data'] and 'new_state' in data['data']:
entity_id = data['data']['entity_id']
self.AD.state.set_state_simple(namespace, entity_id, data['data']['new_state'])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data['data'])
return
if self.AD.apps is True:# and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
for callback in self.AD.callbacks.callbacks:
for uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
return has_log_callback
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback[
"namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data['event_type'][:2] != "__")
or data['event_type'] == callback["event"]):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != \
data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if "log" in callback["kwargs"] and callback["kwargs"]["log"] != data["data"]["log_type"]:
_run = False
if _run:
if name in self.AD.app_management.objects:
await self.AD.threading.dispatch_worker(name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data['event_type'],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"]
})
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
<|code_end|>
| appdaemon/adapi.py
<|code_start|>import asyncio
import datetime
import inspect
import iso8601
import re
from datetime import timedelta
from copy import deepcopy
# needed for fake coro cb that looks like scheduler
import uuid
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class ADAPI:
"""AppDaemon API class.
This class includes all native API calls to AppDaemon
"""
#
# Internal parameters
#
def __init__(self, ad: AppDaemon, name, logging_obj, args, config, app_config, global_vars):
# Store args
self.AD = ad
self.name = name
self._logging = logging_obj
self.config = config
self.app_config = app_config
self.args = deepcopy(args)
self.app_dir = self.AD.app_dir
self.config_dir = self.AD.config_dir
self.dashboard_dir = self.AD.http.dashboard_dir
self.global_vars = global_vars
self._namespace = "default"
self.logger = self._logging.get_child(name)
self.err = self._logging.get_error().getChild(name)
self.user_logs = {}
if "log_level" in args:
self.logger.setLevel(args["log_level"])
self.err.setLevel(args["log_level"])
if "log" in args:
userlog = self.get_user_log(args["log"])
if userlog is not None:
self.logger = userlog
self.dialogflow_v = 2
@staticmethod
def _sub_stack(msg):
# If msg is a data structure of some type, don't sub
if type(msg) is str:
stack = inspect.stack()
if msg.find("__module__") != -1:
msg = msg.replace("__module__", stack[2][1])
if msg.find("__line__") != -1:
msg = msg.replace("__line__", str(stack[2][2]))
if msg.find("__function__") != -1:
msg = msg.replace("__function__", stack[2][3])
return msg
def _get_namespace(self, **kwargs):
if "namespace" in kwargs:
namespace = kwargs["namespace"]
del kwargs["namespace"]
else:
namespace = self._namespace
return namespace
#
# Logging
#
def _log(self, logger, msg, *args, **kwargs):
#
# Internal
#
if "level" in kwargs:
level = kwargs.pop("level", "INFO")
else:
level = "INFO"
ascii_encode = kwargs.pop("ascii_encode", True)
if ascii_encode is True:
safe_enc = lambda s: str(s).encode("utf-8", "replace").decode("ascii", "replace")
msg = safe_enc(msg)
logger.log(self._logging.log_levels[level], msg, *args, **kwargs)
def log(self, msg, *args, **kwargs):
"""Logs a message to AppDaemon's main logfile.
Args:
msg (str): The message to log.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
level (str, optional): The log level of the message - takes a string representing the
standard logger levels (Default: ``"WARNING"``).
ascii_encode (bool, optional): Switch to disable the encoding of all log messages to
ascii. Set this to true if you want to log UTF-8 characters (Default: ``True``).
log (str, optional): Send the message to a specific log, either system or user_defined.
System logs are ``main_log``, ``error_log``, ``diag_log`` or ``access_log``.
Any other value in use here must have a corresponding user-defined entity in
the ``logs`` section of appdaemon.yaml.
stack_info (bool, optional): If ``True`` the stack info will included.
Returns:
None.
Examples:
Log a message to the main logfile of the system.
>>> self.log("Log Test: Parameter is %s", some_variable)
Log a message to the specified logfile.
>>> self.log("Log Test: Parameter is %s", some_variable, log="test_log")
Log a message with error-level to the main logfile of the system.
>>> self.log("Log Test: Parameter is %s", some_variable, level = "ERROR")
Log a message using `placeholders` to the main logfile of the system.
>>> self.log("Line: __line__, module: __module__, function: __function__, Msg: Something bad happened")
Log a WARNING message (including the stack info) to the main logfile of the system.
>>> self.log("Stack is", some_value, level="WARNING", stack_info=True)
"""
if "log" in kwargs:
# Its a user defined log
logger = self.get_user_log(kwargs["log"])
kwargs.pop("log")
else:
logger = self.logger
msg = self._sub_stack(msg)
self._log(logger, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""Logs a message to AppDaemon's error logfile.
Args:
msg (str): The message to log.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
level (str, optional): The log level of the message - takes a string representing the
standard logger levels.
ascii_encode (bool, optional): Switch to disable the encoding of all log messages to
ascii. Set this to true if you want to log UTF-8 characters (Default: ``True``).
log (str, optional): Send the message to a specific log, either system or user_defined.
System logs are ``main_log``, ``error_log``, ``diag_log`` or ``access_log``.
Any other value in use here must have a corresponding user-defined entity in
the ``logs`` section of appdaemon.yaml.
Returns:
None.
Examples:
Log an error message to the error logfile of the system.
>>> self.error("Some Warning string")
Log an error message with critical-level to the error logfile of the system.
>>> self.error("Some Critical string", level = "CRITICAL")
"""
self._log(self.err, msg, *args, **kwargs)
@utils.sync_wrapper
async def listen_log(self, callback, level="INFO", **kwargs):
"""Registers the App to receive a callback every time an App logs a message.
Args:
callback (function): Function to be called when a message is logged.
level (str): Logging level to be used - lower levels will not be forwarded
to the app (Default: ``"INFO"``).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
log (str, optional): Name of the log to listen to, default is all logs. The name
should be one of the 4 built in types ``main_log``, ``error_log``, ``diag_log``
or ``access_log`` or a user defined log entry.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A unique identifier that can be used to cancel the callback if required.
Since variables created within object methods are local to the function they are
created in, and in all likelihood, the cancellation will be invoked later in a
different function, it is recommended that handles are stored in the object
namespace, e.g., self.handle.
Examples:
Listen to all ``WARNING`` log messages of the system.
>>> self.handle = self.listen_log(self.cb, "WARNING")
Listen to all ``WARNING`` log messages of the `main_log`.
>>> self.handle = self.listen_log(self.cb, "WARNING", log="main_log")
Listen to all ``WARNING`` log messages of a user-defined logfile.
>>> self.handle = self.listen_log(self.cb, "WARNING", log="my_custom_log")
"""
namespace = kwargs.pop("namespace", "admin")
return await self.AD.logging.add_log_callback(namespace, self.name, callback, level, **kwargs)
@utils.sync_wrapper
async def cancel_listen_log(self, handle):
"""Cancels the log callback for the App.
Args:
handle: The handle returned when the `listen_log` call was made.
Returns:
None.
Examples:
>>> self.cancel_listen_log(handle)
"""
self.logger.debug("Canceling listen_log for %s", self.name)
await self.AD.logging.cancel_log_callback(self.name, handle)
def get_main_log(self):
"""Returns the underlying logger object used for the main log.
Examples:
Log a critical message to the `main` logfile of the system.
>>> log = self.get_main_log()
>>> log.critical("Log a critical error")
"""
return self.logger
def get_error_log(self):
"""Returns the underlying logger object used for the error log.
Examples:
Log an error message to the `error` logfile of the system.
>>> error_log = self.get_error_log()
>>> error_log.error("Log an error", stack_info=True, exc_info=True)
"""
return self.err
def get_user_log(self, log):
"""Gets the specified-user logger of the App.
Args:
log (str): The name of the log you want to get the underlying logger object from,
as described in the ``logs`` section of ``appdaemon.yaml``.
Returns:
The underlying logger object used for the error log.
Examples:
Log an error message to a user-defined logfile.
>>> log = self.get_user_log("test_log")
>>> log.error("Log an error", stack_info=True, exc_info=True)
"""
logger = None
if log in self.user_logs:
# Did we use it already?
logger = self.user_logs[log]
else:
# Build it on the fly
parent = self.AD.logging.get_user_log(self, log)
if parent is not None:
logger = parent.getChild(self.name)
self.user_logs[log] = logger
if "log_level" in self.args:
logger.setLevel(self.args["log_level"])
return logger
def set_log_level(self, level):
"""Sets a specific log level for the App.
Args:
level (str): Log level.
Returns:
None.
Notes:
Supported log levels: ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``,
``DEBUG``, ``NOTSET``.
Examples:
>>> self.set_log_level("DEBUG")
"""
self.logger.setLevel(self._logging.log_levels[level])
self.err.setLevel(self._logging.log_levels[level])
for log in self.user_logs:
self.user_logs[log].setLevel(self._logging.log_levels[level])
def set_error_level(self, level):
"""Sets the log level to send to the `error` logfile of the system.
Args:
level (str): Error level.
Returns:
None.
Notes:
Supported log levels: ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``,
``DEBUG``, ``NOTSET``.
"""
self.err.setLevel(self._logging.log_levels[level])
#
# Threading
#
@utils.sync_wrapper
async def set_app_pin(self, pin):
"""Sets an App to be pinned or unpinned.
Args:
pin (bool): Sets whether the App becomes pinned or not.
Returns:
None.
Examples:
The following line should be put inside the `initialize()` function.
>>> self.set_app_pin(True)
"""
await self.AD.threading.set_app_pin(self.name, pin)
@utils.sync_wrapper
async def get_app_pin(self):
"""Finds out if the current App is currently pinned or not.
Returns:
bool: ``True`` if the App is pinned, ``False`` otherwise.
Examples:
>>> if self.get_app_pin(True):
>>> self.log("App pinned!")
"""
return await self.AD.threading.get_app_pin(self.name)
@utils.sync_wrapper
async def set_pin_thread(self, thread):
"""Sets the thread that the App will be pinned to.
Args:
thread (int): Number of the thread to pin to. Threads start at 0 and go up to the number
of threads specified in ``appdaemon.yaml`` -1.
Returns:
None.
Examples:
The following line should be put inside the `initialize()` function.
>>> self.set_pin_thread(5)
"""
return await self.AD.threading.set_pin_thread(self.name, thread)
@utils.sync_wrapper
async def get_pin_thread(self):
"""Finds out which thread the App is pinned to.
Returns:
int: The thread number or -1 if the App is not pinned.
Examples:
>>> thread = self.get_pin_thread():
>>> self.log(f"I'm pinned to thread: {thread}")
"""
return await self.AD.threading.get_pin_thread(self.name)
#
# Namespace
#
def set_namespace(self, namespace):
"""Sets a new namespace for the App to use from that point forward.
Args:
namespace (str): Name of the new namespace
Returns:
None.
Examples:
>>> self.set_namespace("hass1")
"""
self._namespace = namespace
def get_namespace(self):
"""Returns the App's namespace."""
return self._namespace
@utils.sync_wrapper
async def list_namespaces(self):
"""Returns a list of available namespaces.
Examples:
>>> self.list_namespaces()
"""
return await self.AD.state.list_namespaces()
@utils.sync_wrapper
async def save_namespace(self, **kwargs):
"""Saves entities created in user-defined namespaces into a file.
This way, when AD restarts these entities will be reloaded into AD with its
previous states within the namespace. This can be used as a basic form of
non-volatile storage of entity data. Depending on the configuration of the
namespace, this function can be setup to constantly be running automatically
or only when AD shutdown. This function also allows for users to manually
execute the command as when needed.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Save all entities of the default namespace.
>>> self.save_namespace()
"""
namespace = self._get_namespace(**kwargs)
await self.AD.state.save_namespace(namespace)
#
# Utility
#
@utils.sync_wrapper
async def get_app(self, name):
"""Gets the instantiated object of another app running within the system.
This is useful for calling functions or accessing variables that reside
in different apps without requiring duplication of code.
Args:
name (str): Name of the app required. This is the name specified in
header section of the config file, not the module or class.
Returns:
An object reference to the class.
Examples:
>>> MyApp = self.get_app("MotionLights")
>>> MyApp.turn_light_on()
"""
return await self.AD.app_management.get_app(name)
@utils.sync_wrapper
async def _check_entity(self, namespace, entity):
if "." not in entity:
raise ValueError(
"{}: Invalid entity ID: {}".format(self.name, entity))
if not await self.AD.state.entity_exists(namespace, entity):
self.logger.warning("%s: Entity %s not found in namespace %s", self.name, entity, namespace)
@staticmethod
def get_ad_version():
"""Returns a string with the current version of AppDaemon.
Examples:
>>> version = self.get_ad_version()
"""
return utils.__version__
@utils.sync_wrapper
async def entity_exists(self, entity_id, **kwargs):
"""Checks the existence of an entity in Home Assistant.
When working with multiple Home Assistant instances, it is possible to specify the
namespace, so that it checks within the right instance in in the event the app is
working in a different instance. Also when using this function, it is also possible
to check if an AppDaemon entity exists.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
bool: ``True`` if the entity id exists, ``False`` otherwise.
Examples:
Check if the entity light.living_room exist within the app's namespace
>>> if self.entity_exists("light.living_room"):
>>> #do something
Check if the entity mqtt.security_settings exist within the `mqtt` namespace
if the app is operating in a different namespace like default
>>> if self.entity_exists("mqtt.security_settings", namespace = "mqtt"):
>>> #do something
"""
namespace = self._get_namespace(**kwargs)
return await self.AD.state.entity_exists(namespace, entity_id)
@utils.sync_wrapper
async def split_entity(self, entity_id, **kwargs):
"""Splits an entity into parts.
This utility function will take a fully qualified entity id of the form ``light.hall_light``
and split it into 2 values, the device and the entity, e.g. light and hall_light.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
A list with 2 entries, the device and entity respectively.
Examples:
Do some action if the device of the entity is `scene`.
>>> device, entity = self.split_entity(entity_id)
>>> if device == "scene":
>>> #do something specific to scenes
"""
await self._check_entity(self._get_namespace(**kwargs), entity_id)
return entity_id.split(".")
@utils.sync_wrapper
async def remove_entity(self, entity_id, **kwargs):
"""Deletes an entity created within a namespaces.
If an entity was created, and its deemed no longer needed, by using this function,
the entity can be removed from AppDaemon permanently.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Delete the entity in the present namespace.
>>> self.remove_entity('sensor.living_room')
Delete the entity in the `mqtt` namespace.
>>> self.remove_entity('mqtt.living_room_temperature', namespace = 'mqtt')
"""
namespace = self._get_namespace(**kwargs)
await self.AD.state.remove_entity(namespace, entity_id)
return None
@staticmethod
def split_device_list(devices):
"""Converts a comma-separated list of device types to an iterable list.
This is intended to assist in use cases where the App takes a list of
entities from an argument, e.g., a list of sensors to monitor. If only
one entry is provided, an iterable list will still be returned to avoid
the need for special processing.
Args:
devices (str): A comma-separated list of devices to be split (without spaces).
Returns:
A list of split devices with 1 or more entries.
Examples:
>>> for sensor in self.split_device_list(self.args["sensors"]):
>>> #do something for each sensor, e.g., make a state subscription
"""
return devices.split(",")
@utils.sync_wrapper
async def get_plugin_config(self, **kwargs):
"""Gets any useful metadata that the plugin may have available.
For instance, for the HASS plugin, this will return Home Assistant configuration
data such as latitude and longitude.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str): Select the namespace of the plugin for which data is desired.
Returns:
A dictionary containing all the configuration information available
from the Home Assistant ``/api/config`` endpoint.
Examples:
>>> config = self.get_plugin_config()
>>> self.log(f'My current position is {config["latitude"]}(Lat), {config["longitude"]}(Long)')
My current position is 50.8333(Lat), 4.3333(Long)
"""
namespace = self._get_namespace(**kwargs)
return await self.AD.plugins.get_plugin_meta(namespace)
@utils.sync_wrapper
async def friendly_name(self, entity_id, **kwargs):
"""Gets the Friendly Name of an entity.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
str: The friendly name of the entity if it exists or the entity id if not.
Examples:
>>> tracker = "device_tracker.andrew"
>>> friendly_name = self.friendly_name(tracker)
>>> tracker_state = self.get_tracker_state(tracker)
>>> self.log(f"{tracker} ({friendly_name}) is {tracker_state}.")
device_tracker.andrew (Andrew Tracker) is on.
"""
await self._check_entity(self._get_namespace(**kwargs), entity_id)
state = await self.get_state(**kwargs)
if entity_id in state:
if "friendly_name" in state[entity_id]["attributes"]:
return state[entity_id]["attributes"]["friendly_name"]
else:
return entity_id
return None
@utils.sync_wrapper
async def set_production_mode(self, mode=True):
"""Deactivates or activates the production mode in AppDaemon.
When called without declaring passing any arguments, mode defaults to ``True``.
Args:
mode (bool): If it is ``True`` the production mode is activated, or deactivated
otherwise.
Returns:
The specified mode or ``None`` if a wrong parameter is passed.
"""
if not isinstance(mode, bool):
self.logger.warning("%s not a valid parameter for Production Mode", mode)
return None
await self.AD.utility.set_production_mode(mode)
return mode
#
# Internal Helper functions
#
def start_app(self, app, **kwargs):
"""Starts an App which can either be running or not.
This Api call cannot start an app which has already been disabled in the App Config.
It essentially only runs the initialize() function in the app, and changes to attributes
like class name or app config is not taken into account.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.start_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "appdaemon"
self.call_service("app/start", **kwargs)
return None
def stop_app(self, app, **kwargs):
"""Stops an App which is running.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.stop_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "appdaemon"
self.call_service("app/stop", **kwargs)
return None
def restart_app(self, app, **kwargs):
"""Restarts an App which can either be running or not.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.restart_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "appdaemon"
self.call_service("app/restart", **kwargs)
return None
def reload_apps(self, **kwargs):
"""Reloads the apps, and loads up those that have changes made to their .yaml or .py files.
This utility function can be used if AppDaemon is running in production mode, and it is
needed to reload apps that changes have been made to.
Args:
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.reload_apps()
"""
kwargs["namespace"] = "appdaemon"
self.call_service("app/reload", **kwargs)
return None
#
# Dialogflow
#
def get_dialogflow_intent(self, data):
"""Gets the intent's action from the Google Home response.
Args:
data: Response received from Google Home.
Returns:
A string representing the Intent from the interaction model that was requested,
or ``None``, if no action was received.
Examples:
>>> intent = ADAPI.get_dialogflow_intent(data)
"""
if "result" in data and "action" in data["result"]:
self.dialogflow_v = 1
return data["result"]["action"]
elif "queryResult" in data and "action" in data["queryResult"]:
self.dialogflow_v = 2
return data["queryResult"]["action"]
else:
return None
@staticmethod
def get_dialogflow_slot_value(data, slot=None):
"""Gets slots' values from the interaction model.
Args:
data: Response received from Google Home.
slot (str): Name of the slot. If a name is not specified, all slots will be returned
as a dictionary. If a name is specified but is not found, ``None`` will be returned.
Returns:
A string representing the value of the slot from the interaction model, or a hash of slots.
Examples:
>>> beer_type = ADAPI.get_dialogflow_intent(data, "beer_type")
>>> all_slots = ADAPI.get_dialogflow_intent(data)
"""
if "result" in data:
# using V1 API
contexts = data["result"]["contexts"][0]
if contexts:
parameters = contexts.get("parameters")
else:
parameters = data["result"]["parameters"]
if slot is None:
return parameters
elif slot in parameters:
return parameters[slot]
else:
return None
elif "queryResult" in data:
# using V2 API
contexts = data["queryResult"]["outputContexts"][0]
if contexts:
parameters = contexts.get("parameters")
else:
parameters = data["queryResult"]["parameters"]
if slot is None:
return parameters
elif slot in parameters:
return parameters[slot]
else:
return None
else:
return None
def format_dialogflow_response(self, speech=None):
"""Formats a response to be returned to Google Home, including speech.
Args:
speech (str): The text for Google Home to say.
Returns:
None.
Examples:
>>> ADAPI.format_dialogflow_response(speech = "Hello World")
"""
if self.dialogflow_v == 1:
speech = \
{
"speech": speech,
"source": "Appdaemon",
"displayText": speech
}
elif self.dialogflow_v == 2:
speech = \
{
"fulfillmentText": speech,
"source": "Appdaemon"
}
else:
speech = None
return speech
#
# Alexa
#
@staticmethod
def format_alexa_response(speech=None, card=None, title=None):
"""Formats a response to be returned to Alex including speech and a card.
Args:
speech (str): The text for Alexa to say.
card (str): Text for the card.
title (str): Title for the card.
Returns:
None.
Examples:
>>> ADAPI.format_alexa_response(speech = "Hello World", card = "Greetings to the world", title = "Hello")
"""
response = \
{
"shouldEndSession": True
}
if speech is not None:
response["outputSpeech"] = \
{
"type": "PlainText",
"text": speech
}
if card is not None:
response["card"] = \
{
"type": "Simple",
"title": title,
"content": card
}
speech = \
{
"version": "1.0",
"response": response,
"sessionAttributes": {}
}
return speech
@staticmethod
def get_alexa_error(data):
"""Gets the error message from the Alexa API response.
Args:
data: Response received from the Alexa API .
Returns:
A string representing the value of message, or ``None`` if no error message was received.
"""
if "request" in data and "err" in data["request"] and "message" in data["request"]["err"]:
return data["request"]["err"]["message"]
else:
return None
@staticmethod
def get_alexa_intent(data):
"""Gets the Intent's name from the Alexa response.
Args:
data: Response received from Alexa.
Returns:
A string representing the Intent's name from the interaction model that was requested,
or ``None``, if no Intent was received.
Examples:
>>> intent = ADAPI.get_alexa_intent(data)
"""
if "request" in data and "intent" in data["request"] and "name" in data["request"]["intent"]:
return data["request"]["intent"]["name"]
else:
return None
@staticmethod
def get_alexa_slot_value(data, slot=None):
"""Gets values for slots from the interaction model.
Args:
data: The request data received from Alexa.
slot: Name of the slot. If a name is not specified, all slots will be returned as
a dictionary. If a name is specified but is not found, None will be returned.
Returns:
A ``string`` representing the value of the slot from the interaction model, or a ``hash`` of slots.
Examples:
>>> beer_type = ADAPI.get_alexa_intent(data, "beer_type")
>>> all_slots = ADAPI.get_alexa_intent(data)
"""
if "request" in data and \
"intent" in data["request"] and \
"slots" in data["request"]["intent"]:
if slot is None:
return data["request"]["intent"]["slots"]
else:
if slot in data["request"]["intent"]["slots"] and \
"value" in data["request"]["intent"]["slots"][slot]:
return data["request"]["intent"]["slots"][slot]["value"]
else:
return None
else:
return None
#
# API
#
@utils.sync_wrapper
async def register_endpoint(self, callback, name=None):
"""Registers an endpoint for API calls into the current App.
Args:
callback: The function to be called when a request is made to the named endpoint.
name (str, optional): The name of the endpoint to be used for the call (Default: ``None``).
Returns:
A handle that can be used to remove the registration.
Examples:
It should be noted that the register function, should return a string (can be empty),
and an HTTP OK status response (e.g., `200`. If this is not added as a returned response,
the function will generate an error each time it is processed.
>>> self.register_endpoint(my_callback)
>>> self.register_callback(alexa_cb, "alexa")
"""
if name is None:
ep = self.name
else:
ep = name
if self.AD.http is not None:
return await self.AD.http.register_endpoint(callback, ep)
else:
self.logger.warning("register_endpoint for %s filed - HTTP component is not configured", name)
return None
@utils.sync_wrapper
async def unregister_endpoint(self, handle):
"""Removes a previously registered endpoint.
Args:
handle: A handle returned by a previous call to ``register_endpoint``
Returns:
None.
Examples:
>>> self.unregister_endpoint(handle)
"""
await self.AD.http.unregister_endpoint(handle, self.name)
#
# State
#
@utils.sync_wrapper
async def listen_state(self, callback, entity=None, **kwargs):
"""Registers a callback to react to state changes.
This function allows the user to register a callback for a wide variety of state changes.
Args:
callback: Function to be invoked when the requested state change occurs. It must conform
to the standard State Callback format documented `here <APPGUIDE.html#state-callbacks>`__
entity (str, optional): name of an entity or device type. If just a device type is provided,
e.g., `light`, or `binary_sensor`. ``listen_state()`` will subscribe to state changes of all
devices of that type. If a fully qualified entity_id is provided, ``listen_state()`` will
listen for state changes for just that entity.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
attribute (str, optional): Name of an attribute within the entity state object. If this
parameter is specified in addition to a fully qualified ``entity_id``. ``listen_state()``
will subscribe to changes for just that attribute within that specific entity.
The ``new`` and ``old`` parameters in the callback function will be provided with
a single value representing the attribute.
The value ``all`` for attribute has special significance and will listen for any
state change within the specified entity, and supply the callback functions with
the entire state dictionary for the specified entity rather than an individual
attribute value.
new (optional): If ``new`` is supplied as a parameter, callbacks will only be made if the
state of the selected attribute (usually state) in the new state match the value
of ``new``.
old (optional): If ``old`` is supplied as a parameter, callbacks will only be made if the
state of the selected attribute (usually state) in the old state match the value
of ``old``.
duration (int, optional): If ``duration`` is supplied as a parameter, the callback will not
fire unless the state listened for is maintained for that number of seconds. This
requires that a specific attribute is specified (or the default of ``state`` is used),
and should be used in conjunction with the ``old`` or ``new`` parameters, or both. When
the callback is called, it is supplied with the values of ``entity``, ``attr``, ``old``,
and ``new`` that were current at the time the actual event occurred, since the assumption
is that none of them have changed in the intervening period.
If you use ``duration`` when listening for an entire device type rather than a specific
entity, or for all state changes, you may get unpredictable results, so it is recommended
that this parameter is only used in conjunction with the state of specific entities.
timeout (int, optional): If ``timeout`` is supplied as a parameter, the callback will be created as normal,
but after ``timeout`` seconds, the callback will be removed. If activity for the listened state has
occurred that would trigger a duration timer, the duration timer will still be fired even though the
callback has been deleted.
immediate (bool, optional): It enables the countdown for a delay parameter to start
at the time, if given. If the ``duration`` parameter is not given, the callback runs immediately.
What this means is that after the callback is registered, rather than requiring one or more
state changes before it runs, it immediately checks the entity's states based on given
parameters. If the conditions are right, the callback runs immediately at the time of
registering. This can be useful if, for instance, you want the callback to be triggered
immediately if a light is already `on`, or after a ``duration`` if given.
If ``immediate`` is in use, and ``new`` and ``duration`` are both set, AppDaemon will check
if the entity is already set to the new state and if so it will start the clock
immediately. If ``new`` and ``duration`` are not set, ``immediate`` will trigger the callback
immediately and report in its callback the new parameter as the present state of the
entity. If ``attribute`` is specified, the state of the attribute will be used instead of
state. In these cases, ``old`` will be ignored and when the callback is triggered, its
state will be set to ``None``.
oneshot (bool, optional): If ``True``, the callback will be automatically cancelled
after the first state change that results in a callback.
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description. In most cases,
it is safe to ignore this parameter. The value ``global`` for namespace has special
significance and means that the callback will listen to state updates from any plugin.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Sets which thread from the worker pool the callback will be
run by (0 - number of threads -1).
*kwargs (optional): Zero or more keyword arguments that will be supplied to the callback
when it is called.
Notes:
The ``old`` and ``new`` args can be used singly or together.
Returns:
A unique identifier that can be used to cancel the callback if required. Since variables
created within object methods are local to the function they are created in, and in all
likelihood, the cancellation will be invoked later in a different function, it is
recommended that handles are stored in the object namespace, e.g., `self.handle`.
Examples:
Listen for any state change and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback)
Listen for any state change involving a light and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light")
Listen for a state change involving `light.office1` and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1")
Listen for a state change involving `light.office1` and return the entire state as a dict.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "all")
Listen for a change involving the brightness attribute of `light.office1` and return the
brightness attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "brightness")
Listen for a state change involving `light.office1` turning on and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on")
Listen for a change involving `light.office1` changing from brightness 100 to 200 and return the
brightness attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "brightness", old = "100", new = "200")
Listen for a state change involving `light.office1` changing to state on and remaining on for a minute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on", duration = 60)
Listen for a state change involving `light.office1` changing to state on and remaining on for a minute
trigger the delay immediately if the light is already on.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on", duration = 60, immediate = True)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
name = self.name
if entity is not None and "." in entity:
await self._check_entity(namespace, entity)
self.logger.debug("Calling listen_state for %s", self.name)
return await self.AD.state.add_state_callback(name, namespace, entity, callback, kwargs)
@utils.sync_wrapper
async def cancel_listen_state(self, handle):
"""Cancels a ``listen_state()`` callback.
This will mean that the App will no longer be notified for the specific
state change that has been cancelled. Other state changes will continue
to be monitored.
Args:
handle: The handle returned when the ``listen_state()`` call was made.
Returns:
None.
Examples:
>>> self.cancel_listen_state(self.office_light_handle)
"""
self.logger.debug("Canceling listen_state for %s", self.name)
await self.AD.state.cancel_state_callback(handle, self.name)
@utils.sync_wrapper
async def info_listen_state(self, handle):
"""Gets information on state a callback from its handle.
Args:
handle: The handle returned when the ``listen_state()`` call was made.
Returns:
The values supplied for ``entity``, ``attribute``, and ``kwargs`` when
the callback was initially created.
Examples:
>>> entity, attribute, kwargs = self.info_listen_state(self.handle)
"""
self.logger.debug("Calling info_listen_state for %s", self.name)
return await self.AD.state.info_state_callback(handle, self.name)
@utils.sync_wrapper
async def get_state(self, entity_id=None, attribute=None, default=None, copy=True, **kwargs):
"""Gets the state of any component within Home Assistant.
State updates are continuously tracked, so this call runs locally and does not require
AppDaemon to call back to Home Assistant. In other words, states are updated using a
push-based approach instead of a pull-based one.
Args:
entity_id (str, optional): This is the name of an entity or device type. If just
a device type is provided, e.g., `light` or `binary_sensor`, `get_state()`
will return a dictionary of all devices of that type, indexed by the ``entity_id``,
containing all the state for each entity. If a fully qualified ``entity_id``
is provided, ``get_state()`` will return the state attribute for that entity,
e.g., ``on`` or ``off`` for a light.
attribute (str, optional): Name of an attribute within the entity state object.
If this parameter is specified in addition to a fully qualified ``entity_id``,
a single value representing the attribute will be returned. The value ``all``
for attribute has special significance and will return the entire state
dictionary for the specified entity rather than an individual attribute value.
default (any, optional): The value to return when the requested attribute or the
whole entity doesn't exist (Default: ``None``).
copy (bool, optional): By default, a copy of the stored state object is returned.
When you set ``copy`` to ``False``, you get the same object as is stored
internally by AppDaemon. Avoiding the copying brings a small performance gain,
but also gives you write-access to the internal AppDaemon data structures,
which is dangerous. Only disable copying when you can guarantee not to modify
the returned state object, e.g., you do read-only operations.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
Returns:
The entire state of Home Assistant at that given time, if if ``get_state()``
is called with no parameters. This will consist of a dictionary with a key
for each entity. Under that key will be the standard entity state information.
Examples:
Get the state of the entire system.
>>> state = self.get_state()
Get the state of all switches in the system.
>>> state = self.get_state("switch")
Get the state attribute of `light.office_1`.
>>> state = self.get_state("light.office_1")
Get the brightness attribute of `light.office_1`.
>>> state = self.get_state("light.office_1", attribute="brightness")
Get the entire state of `light.office_1`.
>>> state = self.get_state("light.office_1", attribute="all")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return await self.AD.state.get_state(
self.name, namespace, entity_id, attribute, default, copy, **kwargs
)
@utils.sync_wrapper
async def set_state(self, entity_id, **kwargs):
"""Updates the state of the specified entity.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
state: New state value to be set.
attributes (optional): Entity's attributes to be updated.
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
replace(bool, optional): If a `replace` flag is given and set to ``True`` and ``attributes``
is provided, AD will attempt to replace its internal entity register with the newly
supplied attributes completely. This can be used to replace attributes in an entity
which are no longer needed. Do take note this is only possible for internal entity state.
For plugin based entities, this is not recommended, as the plugin will mostly replace
the new values, when next it updates.
Returns:
A dictionary that represents the new state of the updated entity.
Examples:
Update the state of an entity.
>>> self.set_state("light.office_1", state="off")
Update the state and attribute of an entity.
>>> self.set_state("light.office_1", state = "on", attributes = {"color_name": "red"})
Update the state of an entity within the specified namespace.
>>> self.set_state("light.office_1", state="off", namespace ="hass")
"""
self.logger.debug("set state: %s, %s", entity_id, kwargs)
namespace = self._get_namespace(**kwargs)
await self._check_entity(namespace, entity_id)
if "namespace" in kwargs:
del kwargs["namespace"]
return await self.AD.state.set_state(self.name, namespace, entity_id, **kwargs)
#
# Service
#
@staticmethod
def _check_service(service):
if service.find("/") == -1:
raise ValueError("Invalid Service Name: {}".format(service))
def register_service(self, service, cb, **kwargs):
"""Registers a service that can be called from other apps, the REST API and the Event Stream
Using this function, an App can register a function to be available in the service registry.
This will automatically make it available to other apps using the `call_service()` API call, as well as publish
it as a service in the REST API and make it available to the `call_service` command in the event stream.
Args:
service: Name of the service, in the format `domain/service`. If the domain does not exist it will be created
cb: A reference to the function to be called when the service is requested. This function may be a regular
function, or it may be asynch. Note that if it is an async function, it will run on AppDaemon's main loop
meaning that any issues with the service could result in a delay of AppDaemon's core functions.
Returns:
None
Examples:
>>> self.register_service("myservices/service1", mycallback)
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("register_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
self.AD.services.register_service(namespace, d, s, cb, __async="auto", **kwargs)
@utils.sync_wrapper
async def call_service(self, service, **kwargs):
"""Calls a HASS service within AppDaemon.
This function can call any service and provide any required parameters.
Available services can be found using the developer tools in the UI.
For `listed services`, the part before the first period is the ``domain``,
and the part after is the ``service name`. For instance, `light/turn_on`
has a domain of `light` and a service name of `turn_on`.
Args:
service (str): The service name.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
**kwargs: Each service has different parameter requirements. This argument
allows you to specify a comma-separated list of keyword value pairs, e.g.,
`entity_id = light.office_1`. These parameters will be different for
every service and can be discovered using the developer tools. Most all
service calls require an ``entity_id``.
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
Returns:
None.
Examples:
HASS
>>> self.call_service("light/turn_on", entity_id = "light.office_lamp", color_name = "red")
>>> self.call_service("notify/notify", title = "Hello", message = "Hello World")
MQTT
>>> call_service("mqtt/subscribe", topic="homeassistant/living_room/light", qos=2)
>>> call_service("mqtt/publish", topic="homeassistant/living_room/light", payload="on")
Utility
>>> call_service("app/restart", app="notify_app", namespace="appdaemon")
>>> call_service("app/stop", app="lights_app", namespace="appdaemon")
>>> call_service("app/reload", namespace="appdaemon")
For Utility, it is important that the `namespace` arg is set to ``appdaemon``
as no app can work within that `namespace`. If not namespace is specified,
calling this function will rise an error.
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("call_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
kwargs["__name"] = self.name
return await self.AD.services.call_service(namespace, d, s, kwargs)
@utils.sync_wrapper
async def run_sequence(self, sequence, **kwargs):
"""Run an AppDaemon Sequence. Sequences are defined in a valid apps.yaml file or inline, and are sequences of
service calls.
Args:
sequence: The sequence name, referring to the correct entry in apps.yaml, or a dict containing
actual commands to run
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
Returns:
A handle that can be used with `cancel_sequence()` to terminate the script.
Examples:
Run a yaml-defined sequence called "sequence.front_room_scene".
>>> handle = self.run_sequence("sequence.front_room_scene")
Run an inline sequence.
>>> handle = self.run_sequence([{"light.turn_on": {"entity_id": "light.office_1"}}, {"sleep": 5}, {"light.turn_off":
{"entity_id": "light.office_1"}}])
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling run_sequence() for %s", self.name)
return await self.AD.sequences.run_sequence( _name, namespace, sequence, **kwargs)
@utils.sync_wrapper
async def cancel_sequence(self, handle):
"""Cancel an AppDaemon Sequence.
Args:
handle: The handle returned by the `run_sequence()` call
Returns:
None.
Examples:
>>> self.run_sequence(handle)
"""
_name = self.name
self.logger.debug("Calling run_sequence() for %s", self.name)
await self.AD.sequences.cancel_sequence( _name, handle)
#
# Events
#
@utils.sync_wrapper
async def listen_event(self, callback, event=None, **kwargs):
"""Registers a callback for a specific event, or any event.
Args:
callback: Function to be invoked when the requested state change occurs.
It must conform to the standard State Callback format documented `here <APPGUIDE.html#state-callbacks>`__
event (optional): Name of the event to subscribe to. Can be a standard
Home Assistant event such as `service_registered` or an arbitrary
custom event such as `"MODE_CHANGE"`. If no event is specified,
`listen_event()` will subscribe to all events.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
oneshot (bool, optional): If ``True``, the callback will be automatically cancelled
after the first state change that results in a callback.
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter. The value ``global``
for namespace has special significance, and means that the callback will
listen to state updates from any plugin.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
timeout (int, optional): If ``timeout`` is supplied as a parameter, the callback will be created as normal,
but after ``timeout`` seconds, the callback will be removed.
**kwargs (optional): One or more keyword value pairs representing App specific
parameters to supply to the callback. If the keywords match values within the
event data, they will act as filters, meaning that if they don't match the
values, the callback will not fire.
As an example of this, a `Minimote` controller when activated will generate
an event called zwave.scene_activated, along with 2 pieces of data that are
specific to the event - entity_id and scene. If you include keyword values
for either of those, the values supplied to the `listen_event()` call must
match the values in the event or it will not fire. If the keywords do not
match any of the data in the event they are simply ignored.
Filtering will work with any event type, but it will be necessary to figure
out the data associated with the event to understand what values can be
filtered on. This can be achieved by examining Home Assistant's `logfiles`
when the event fires.
Returns:
A handle that can be used to cancel the callback.
Examples:
Listen all `"MODE_CHANGE"` events.
>>> self.listen_event(self.mode_event, "MODE_CHANGE")
Listen for a `minimote` event activating scene 3.
>>> self.listen_event(self.generic_event, "zwave.scene_activated", scene_id = 3)
Listen for a `minimote` event activating scene 3 from a specific `minimote`.
>>> self.listen_event(self.generic_event, "zwave.scene_activated", entity_id = "minimote_31", scene_id = 3)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling listen_event for %s", self.name)
return await self.AD.events.add_event_callback(_name, namespace, callback, event, **kwargs)
@utils.sync_wrapper
async def cancel_listen_event(self, handle):
"""Cancels a callback for a specific event.
Args:
handle: A handle returned from a previous call to ``listen_event()``.
Returns:
None.
Examples:
>>> self.cancel_listen_event(handle)
"""
self.logger.debug("Canceling listen_event for %s", self.name)
await self.AD.events.cancel_event_callback(self.name, handle)
@utils.sync_wrapper
async def info_listen_event(self, handle):
"""Gets information on an event callback from its handle.
Args:
handle: The handle returned when the ``listen_event()`` call was made.
Returns:
The values (service, kwargs) supplied when the callback was initially created.
Examples:
>>> service, kwargs = self.info_listen_event(handle)
"""
self.logger.debug("Calling info_listen_event for %s", self.name)
return await self.AD.events.info_event_callback(self.name, handle)
@utils.sync_wrapper
async def fire_event(self, event, **kwargs):
"""Fires an event on the AppDaemon bus, for apps and plugins.
Args:
event: Name of the event. Can be a standard Home Assistant event such as
`service_registered` or an arbitrary custom event such as "MODE_CHANGE".
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
**kwargs (optional): Zero or more keyword arguments that will be supplied as
part of the event.
Returns:
None.
Examples:
>>> self.fire_event("MY_CUSTOM_EVENT", jam="true")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self.AD.events.fire_event(namespace, event, **kwargs)
#
# Time
#
def parse_utc_string(self, utc_string):
"""Converts a UTC to its string representation.
Args:
utc_string (str): A string that contains a date and time to convert.
Returns:
An UTC object that is equivalent to the date and time contained in `utc_string`.
"""
return datetime.datetime(*map(
int, re.split(r'[^\d]', utc_string)[:-1]
)).timestamp() + self.get_tz_offset() * 60
@staticmethod
def get_tz_offset():
"""Returns the timezone difference between UTC and Local Time."""
utc_offset_min = int(round(
(datetime.datetime.now()
- datetime.datetime.utcnow()).total_seconds())
) / 60 # round for taking time twice
utc_offset_h = utc_offset_min / 60
# we do not handle 1/2 h timezone offsets
assert utc_offset_min == utc_offset_h * 60
return utc_offset_min
@staticmethod
def convert_utc(utc):
"""Gets a `datetime` object for the specified UTC.
Home Assistant provides timestamps of several different sorts that may be
used to gain additional insight into state changes. These timestamps are
in UTC and are coded as `ISO 8601` combined date and time strings. This function
will accept one of these strings and convert it to a localised Python
`datetime` object representing the timestamp.
Args:
utc: An `ISO 8601` encoded date and time string in the following
format: `2016-07-13T14:24:02.040658-04:00`
Returns:
A localised Python `datetime` object representing the timestamp.
"""
return iso8601.parse_date(utc)
@utils.sync_wrapper
async def sun_up(self):
"""Determines if the sun is currently up.
Returns:
bool: ``True`` if the sun is up, ``False`` otherwise.
Examples:
>>> if self.sun_up():
>>> #do something
"""
return await self.AD.sched.sun_up()
@utils.sync_wrapper
async def sun_down(self):
"""Determines if the sun is currently down.
Returns:
bool: ``True`` if the sun is down, ``False`` otherwise.
Examples:
>>> if self.sun_down():
>>> #do something
"""
return await self.AD.sched.sun_down()
@utils.sync_wrapper
async def parse_time(self, time_str, name=None, aware=False):
"""Creates a `time` object from its string representation.
This functions takes a string representation of a time, or sunrise,
or sunset offset and converts it to a datetime.time object.
Args:
time_str (str): A representation of the time in a string format with one
of the following formats:
a. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
b. ``sunrise|sunset [+|- HH:MM:SS]`` - time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes and seconds.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
aware (bool, optional): If ``True`` the created time object will be aware of timezone.
Returns:
A `time` object, representing the time given in the `time_str` argument.
Examples:
>>> self.parse_time("17:30:00")
17:30:00
>>> time = self.parse_time("sunrise")
04:33:17
>>> time = self.parse_time("sunset + 00:30:00")
19:18:48
>>> time = self.parse_time("sunrise + 01:00:00")
05:33:17
"""
return await self.AD.sched.parse_time(time_str, name, aware)
@utils.sync_wrapper
async def parse_datetime(self, time_str, name=None, aware=False):
"""Creates a `datetime` object from its string representation.
This function takes a string representation of a date and time, or sunrise,
or sunset offset and converts it to a `datetime` object.
Args:
time_str (str): A string representation of the datetime with one of the
following formats:
a. ``YY-MM-DD-HH:MM:SS`` - the date and time in Year, Month, Day, Hours,
Minutes, and Seconds, 24 hour format.
b. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
c. ``sunrise|sunset [+|- HH:MM:SS]`` - time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes and seconds.
If the ``HH:MM:SS`` format is used, the resulting datetime object will have
today's date.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
aware (bool, optional): If ``True`` the created datetime object will be aware
of timezone.
Returns:
A `datetime` object, representing the time and date given in the
`time_str` argument.
Examples:
>>> self.parse_datetime("2018-08-09 17:30:00")
2018-08-09 17:30:00
>>> self.parse_datetime("17:30:00")
2019-08-15 17:30:00
>>> self.parse_datetime("sunrise")
2019-08-16 05:33:17
>>> self.parse_datetime("sunset + 00:30:00")
2019-08-16 19:18:48
>>> self.parse_datetime("sunrise + 01:00:00")
2019-08-16 06:33:17
"""
return await self.AD.sched.parse_datetime(time_str, name, aware)
@utils.sync_wrapper
async def get_now(self):
"""Returns the current Local Date and Time.
Examples:
>>> self.get_now()
2019-08-16 21:17:41.098813+00:00
"""
return await self.AD.sched.get_now()
@utils.sync_wrapper
async def get_now_ts(self):
"""Returns the current Local Timestamp.
Examples:
>>> self.get_now_ts()
1565990318.728324
"""
return await self.AD.sched.get_now_ts()
@utils.sync_wrapper
async def now_is_between(self, start_time, end_time, name=None):
"""Determines is the current `time` is within the specified start and end times.
This function takes two string representations of a ``time``, or ``sunrise`` or ``sunset``
offset and returns ``true`` if the current time is between those 2 times. Its
implementation can correctly handle transitions across midnight.
Args:
start_time (str): A string representation of the start time.
end_time (str): A string representation of the end time.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
Returns:
bool: ``True`` if the current time is within the specified start and end times,
``False`` otherwise.
Notes:
The string representation of the ``start_time`` and ``end_time`` should follows
one of these formats:
a. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
b. ``sunrise|sunset [+|- HH:MM:SS]``- time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes,
and Seconds.
Examples:
>>> if self.now_is_between("17:30:00", "08:00:00"):
>>> #do something
>>> if self.now_is_between("sunset - 00:45:00", "sunrise + 00:45:00"):
>>> #do something
"""
return await self.AD.sched.now_is_between(start_time, end_time, name)
@utils.sync_wrapper
async def sunrise(self, aware=False):
"""Returns a `datetime` object that represents the next time Sunrise will occur.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.sunrise()
2019-08-16 05:33:17
"""
return await self.AD.sched.sunrise(aware)
@utils.sync_wrapper
async def sunset(self, aware=False):
"""Returns a `datetime` object that represents the next time Sunset will occur.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.sunset()
2019-08-16 19:48:48
"""
return await self.AD.sched.sunset(aware)
@utils.sync_wrapper
async def time(self):
"""Returns a localised `time` object representing the current Local Time.
Use this in preference to the standard Python ways to discover the current time,
especially when using the "Time Travel" feature for testing.
Examples:
>>> self.time()
20:15:31.295751
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz).time()
@utils.sync_wrapper
async def datetime(self, aware=False):
"""Returns a `datetime` object representing the current Local Date and Time.
Use this in preference to the standard Python ways to discover the current
datetime, especially when using the "Time Travel" feature for testing.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.datetime()
2019-08-15 20:15:55.549379
"""
if aware is True:
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz)
else:
return await self.AD.sched.get_now_naive()
@utils.sync_wrapper
async def date(self):
"""Returns a localised `date` object representing the current Local Date.
Use this in preference to the standard Python ways to discover the current date,
especially when using the "Time Travel" feature for testing.
Examples:
>>> self.date()
2019-08-15
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz).date()
def get_timezone(self):
"""Returns the current time zone."""
return self.AD.time_zone
#
# Scheduler
#
@utils.sync_wrapper
async def cancel_timer(self, handle):
"""Cancels a previously created timer.
Args:
handle: A handle value returned from the original call to create the timer.
Returns:
None.
Examples:
>>> self.cancel_timer(handle)
"""
name = self.name
self.logger.debug("Canceling timer with handle %s for %s", handle, self.name)
await self.AD.sched.cancel_timer(name, handle)
@utils.sync_wrapper
async def info_timer(self, handle):
"""Gets information on a scheduler event from its handle.
Args:
handle: The handle returned when the scheduler call was made.
Returns:
`time` - datetime object representing the next time the callback will be fired
`interval` - repeat interval if applicable, `0` otherwise.
`kwargs` - the values supplied when the callback was initially created.
or ``None`` - if handle is invalid or timer no longer exists.
Examples:
>>> time, interval, kwargs = self.info_timer(handle)
"""
return await self.AD.sched.info_timer(handle, self.name)
@utils.sync_wrapper
async def run_in(self, callback, delay, **kwargs):
"""Runs the callback in a defined number of seconds.
This is used to add a delay, for instance, a 60 second delay before
a light is turned off after it has been triggered by a motion detector.
This callback should always be used instead of ``time.sleep()`` as
discussed previously.
Args:
callback: Function to be invoked when the requested state change occurs.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
delay (int): Delay, in seconds before the callback is invoked.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run the specified callback after 10 seconds.
>>> self.handle = self.run_in(self.run_in_c, 10)
Run the specified callback after 10 seconds with a keyword arg (title).
>>> self.handle = self.run_in(self.run_in_c, 5, title = "run_in5")
"""
name = self.name
self.logger.debug("Registering run_in in %s seconds for %s", delay, name)
# convert seconds to an int if possible since a common pattern is to
# pass this through from the config file which is a string
exec_time = await self.get_now() + timedelta(seconds=int(delay))
handle = await self.AD.sched.insert_schedule(
name,
exec_time,
callback,
False,
None,
**kwargs)
return handle
@utils.sync_wrapper
async def run_once(self, callback, start, **kwargs):
"""Runs the callback once, at the specified time of day.
Args:
callback: Function to be invoked at the specified time of day.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur. If the time
specified is in the past, the callback will occur the ``next day`` at
the specified time.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run at 4pm today, or 4pm tomorrow if it is already after 4pm.
>>> runtime = datetime.time(16, 0, 0)
>>> handle = self.run_once(self.run_once_c, runtime)
Run today at 10:30 using the `parse_time()` function.
>>> handle = self.run_once(self.run_once_c, "10:30:00")
Run at sunset.
>>> handle = self.run_once(self.run_once_c, "sunset")
Run an hour after sunrise.
>>> handle = self.run_once(self.run_once_c, "sunrise + 01:00:00")
"""
if type(start) == datetime.time:
when = start
elif type(start) == str:
start_time_obj = await self.AD.sched._parse_time(start, self.name)
when = start_time_obj["datetime"].time()
else:
raise ValueError("Invalid type for start")
name = self.name
self.logger.debug("Registering run_once at %s for %s", when, name)
now = await self.get_now()
today = now.date()
event = datetime.datetime.combine(today, when)
aware_event = self.AD.sched.convert_naive(event)
if aware_event < now:
one_day = datetime.timedelta(days=1)
aware_event = aware_event + one_day
handle = await self.AD.sched.insert_schedule(
name, aware_event, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_at(self, callback, start, **kwargs):
"""Runs the callback once, at the specified time of day.
Args:
callback: Function to be invoked at the specified time of day.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
The ``run_at()`` function will ``raise`` an exception if the specified time is in the ``past``.
Examples:
Run at 4pm today.
>>> runtime = datetime.time(16, 0, 0)
>>> today = datetime.date.today()
>>> event = datetime.datetime.combine(today, runtime)
>>> handle = self.run_at(self.run_at_c, event)
Run today at 10:30 using the `parse_time()` function.
>>> handle = self.run_at(self.run_at_c, "10:30:00")
Run on a specific date and time.
>>> handle = self.run_at(self.run_at_c, "2018-12-11 10:30:00")
Run at the next sunset.
>>> handle = self.run_at(self.run_at_c, "sunset")
Run an hour after the next sunrise.
>>> handle = self.run_at(self.run_at_c, "sunrise + 01:00:00")
"""
if type(start) == datetime.datetime:
when = start
elif type(start) == str:
start_time_obj = await self.AD.sched._parse_time(start, self.name)
when = start_time_obj["datetime"]
else:
raise ValueError("Invalid type for start")
aware_when = self.AD.sched.convert_naive(when)
name = self.name
self.logger.debug("Registering run_at at %s for %s", when, name)
now = await self.get_now()
if aware_when < now:
raise ValueError(
"{}: run_at() Start time must be "
"in the future".format(self.name)
)
handle = await self.AD.sched.insert_schedule(
name, aware_when, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_daily(self, callback, start, **kwargs):
"""Runs the callback at the same time every day.
Args:
callback: Function to be invoked every day at the specified time.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur. If the time
specified is in the past, the callback will occur the ``next day`` at
the specified time.
When specifying sunrise or sunset relative times using the ``parse_datetime()``
format, the time of the callback will be adjusted every day to track the actual
value of sunrise or sunset.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run daily at 7pm.
>>> runtime = datetime.time(19, 0, 0)
>>> self.run_daily(self.run_daily_c, runtime)
Run at 10:30 every day using the `parse_time()` function.
>>> handle = self.run_daily(self.run_daily_c, "10:30:00")
Run every day at sunrise.
>>> handle = self.run_daily(self.run_daily_c, "sunrise")
Run every day an hour after sunset.
>>> handle = self.run_daily(self.run_daily_c, "sunset + 01:00:00")
"""
info = None
when = None
if type(start) == datetime.time:
when = start
elif type(start) == str:
info = await self.AD.sched._parse_time(start, self.name)
else:
raise ValueError("Invalid type for start")
if info is None or info["sun"] is None:
if when is None:
when = info["datetime"].time()
aware_now = await self.get_now()
now = self.AD.sched.make_naive(aware_now)
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
event = event + datetime.timedelta(days=1)
handle = await self.run_every(callback, event, 24 * 60 * 60, **kwargs)
elif info["sun"] == "sunrise":
kwargs["offset"] = info["offset"]
handle = await self.run_at_sunrise(callback, **kwargs)
else:
kwargs["offset"] = info["offset"]
handle = await self.run_at_sunset(callback, **kwargs)
return handle
@utils.sync_wrapper
async def run_hourly(self, callback, start, **kwargs):
"""Runs the callback at the same time every hour.
Args:
callback: Function to be invoked every hour at the specified time.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``time`` object that specifies when the callback will occur,
the hour component of the time object is ignored. If the time specified
is in the past, the callback will occur the ``next hour`` at the specified
time. If time is not supplied, the callback will start an hour from the
time that ``run_hourly()`` was executed.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every hour, on the hour.
>>> runtime = datetime.time(0, 0, 0)
>>> self.run_hourly(self.run_hourly_c, runtime)
"""
now = await self.get_now()
if start is None:
event = now + datetime.timedelta(hours=1)
else:
event = now
event = event.replace(minute=start.minute, second=start.second)
if event < now:
event = event + datetime.timedelta(hours=1)
handle = await self.run_every(callback, event, 60 * 60, **kwargs)
return handle
@utils.sync_wrapper
async def run_minutely(self, callback, start, **kwargs):
"""Runs the callback at the same time every minute.
Args:
callback: Function to be invoked every minute.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``time`` object that specifies when the callback will occur,
the hour and minute components of the time object are ignored. If the
time specified is in the past, the callback will occur the ``next minute`` at
the specified time. If time is not supplied, the callback will start a
minute from the time that ``run_minutely()`` was executed.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every minute on the minute.
>>> time = datetime.time(0, 0, 0)
>>> self.run_minutely(self.run_minutely_c, time)
"""
now = await self.get_now()
if start is None:
event = now + datetime.timedelta(minutes=1)
else:
event = now
event = event.replace(second=start.second)
if event < now:
event = event + datetime.timedelta(minutes=1)
handle = await self.run_every(callback, event, 60, **kwargs)
return handle
@utils.sync_wrapper
async def run_every(self, callback, start, interval, **kwargs):
"""Runs the callback with a configurable delay starting at a specific time.
Args:
callback: Function to be invoked when the time interval is reached.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``datetime`` object that specifies when the initial callback
will occur.
interval: Frequency (expressed in seconds) in which the callback should be executed.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every 17 minutes starting in 2 hours time.
>>> self.run_every(self.run_every_c, time, 17 * 60)
"""
name = self.name
now = await self.get_now()
aware_start = self.AD.sched.convert_naive(start)
if aware_start < now:
raise ValueError("start cannot be in the past")
self.logger.debug("Registering run_every starting %s in %ss intervals for %s", aware_start, interval, name)
handle = await self.AD.sched.insert_schedule(
name, aware_start, callback, True,
None, interval=interval, **kwargs)
return handle
@utils.sync_wrapper
async def _schedule_sun(self, name, type_, callback, **kwargs):
if type_ == "next_rising":
event = self.AD.sched.next_sunrise()
else:
event = self.AD.sched.next_sunset()
handle = await self.AD.sched.insert_schedule(
name, event, callback, True, type_, **kwargs)
return handle
@utils.sync_wrapper
async def run_at_sunset(self, callback, **kwargs):
"""Runs a callback every day at or around sunset.
Args:
callback: Function to be invoked at or around sunset. It must conform to the
standard Scheduler Callback format documented `here <APPGUIDE.html#about-schedule-callbacks>`__.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
offset (int, optional): The time in seconds that the callback should be delayed after
sunrise. A negative value will result in the callback occurring before sunrise.
This parameter cannot be combined with ``random_start`` or ``random_end``.
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Example using timedelta.
>>> self.run_at_sunset(self.sun, offset = datetime.timedelta(minutes = -45).total_seconds())
Or you can just do the math yourself.
>>> self.run_at_sunset(self.sun, offset = 30 * 60)
Run at a random time +/- 60 minutes from sunset.
>>> self.run_at_sunset(self.sun, random_start = -60*60, random_end = 60*60)
Run at a random time between 30 and 60 minutes before sunset.
>>> self.run_at_sunset(self.sun, random_start = -60*60, random_end = 30*60)
"""
name = self.name
self.logger.debug("Registering run_at_sunset with kwargs = %s for %s", kwargs, name)
handle = await self._schedule_sun(name, "next_setting", callback, **kwargs)
return handle
@utils.sync_wrapper
async def run_at_sunrise(self, callback, **kwargs):
"""Runs a callback every day at or around sunrise.
Args:
callback: Function to be invoked at or around sunrise. It must conform to the
standard Scheduler Callback format documented `here <APPGUIDE.html#about-schedule-callbacks>`__.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
offset (int, optional): The time in seconds that the callback should be delayed after
sunrise. A negative value will result in the callback occurring before sunrise.
This parameter cannot be combined with ``random_start`` or ``random_end``.
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run 45 minutes before sunset.
>>> self.run_at_sunrise(self.sun, offset = datetime.timedelta(minutes = -45).total_seconds())
Or you can just do the math yourself.
>>> self.run_at_sunrise(self.sun, offset = 30 * 60)
Run at a random time +/- 60 minutes from sunrise.
>>> self.run_at_sunrise(self.sun, random_start = -60*60, random_end = 60*60)
Run at a random time between 30 and 60 minutes before sunrise.
>>> self.run_at_sunrise(self.sun, random_start = -60*60, random_end = 30*60)
"""
name = self.name
self.logger.debug("Registering run_at_sunrise with kwargs = %s for %s", kwargs, name)
handle = await self._schedule_sun(name, "next_rising", callback, **kwargs)
return handle
#
# Dashboard
#
def dash_navigate(self, target, timeout=-1, ret=None, sticky=0):
"""Forces all connected Dashboards to navigate to a new URL.
Args:
target (str): Name of the new Dashboard to navigate to (e.g., ``/SensorPanel``).
Note that this value is not a URL.
timeout (int): Length of time to stay on the new dashboard before returning
to the original. This argument is optional and if not specified, the
navigation will be permanent. Note that if there is a click or touch on
the new panel before the timeout expires, the timeout will be cancelled.
ret (str): Dashboard to return to after the timeout has elapsed.
sticky (int): Specifies whether or not to return to the original dashboard
after it has been clicked on. The default behavior (``sticky=0``) is to remain
on the new dashboard if clicked, or return to the original otherwise.
By using a different value (sticky= 5), clicking the dashboard will extend
the amount of time (in seconds), but it will return to the original dashboard
after a period of inactivity equal to timeout.
Returns:
None.
Examples:
Switch to AlarmStatus Panel then return to current panel after 10 seconds.
>>> self.dash_navigate("/AlarmStatus", timeout=10)
Switch to Locks Panel then return to Main panel after 10 seconds.
>>> self.dash_navigate("/Locks", timeout=10, ret="/SensorPanel")
"""
kwargs = {"command": "navigate", "target": target, "sticky": sticky}
if timeout != -1:
kwargs["timeout"] = timeout
if ret is not None:
kwargs["return"] = ret
self.fire_event("__HADASHBOARD_EVENT", **kwargs)
#
# Async
#
async def run_in_executor(self, func, *args, **kwargs):
return await utils.run_in_executor(self, func, *args, **kwargs)
@utils.sync_wrapper
async def create_task(self, coro, callback=None, **kwargs):
"""Schedules a Coroutine to be executed.
Args:
coro: The coroutine object (`not coroutine function`) to be executed.
callback: The non-async callback to be executed when complete.
**kwargs (optional): Any additional keyword arguments to send the callback.
Returns:
A Future, which can be cancelled by calling f.cancel().
Examples:
>>> f = self.create_task(asyncio.sleep(3), callback=self.coro_callback)
>>>
>>> def coro_callback(self, kwargs):
"""
# get stuff we'll need to fake scheduler call
sched_data = {
"id": uuid.uuid4().hex,
"name": self.name,
"objectid": self.AD.app_management.objects[self.name]["id"],
"type": "scheduler",
"function": callback,
"pin_app": await self.get_app_pin(),
"pin_thread": await self.get_pin_thread(),
}
def callback_inner(f):
try:
# TODO: use our own callback type instead of borrowing
# from scheduler
kwargs["result"] = f.result()
sched_data["kwargs"] = kwargs
self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))
# callback(f.result(), kwargs)
except asyncio.CancelledError:
pass
f = asyncio.ensure_future(coro)
if callback is not None:
self.logger.debug("Adding add_done_callback for coro %s for %s", f, self.name)
f.add_done_callback(callback_inner)
self.AD.futures.add_future(self.name, f)
return f
@staticmethod
async def sleep(delay, result=None):
"""Pause execution for a certain time span
(not available in sync apps)
Args:
delay (int): Number of seconds to pause.
result (optional): Result to return upon delay completion.
Returns:
Result or `None`.
Notes:
This function is not available in sync apps.
Examples:
>>> async def myfunction(self):
>>> await self.sleep(5)
"""
is_async = None
try:
asyncio.get_event_loop()
is_async = True
except RuntimeError:
is_async = False
if not is_async:
raise RuntimeError("The sleep method is for use in ASYNC methods only")
return await asyncio.sleep(delay, result=result)
#
# Other
#
def run_in_thread(self, callback, thread, **kwargs):
"""Schedules a callback to be run in a different thread from the current one.
Args:
callback: Function to be run on the new thread.
thread (int): Thread number (0 - number of threads).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
None.
Examples:
>>> self.run_in_thread(my_callback, 8)
"""
self.run_in(callback, 0, pin=False, pin_thread=thread, **kwargs)
@utils.sync_wrapper
async def get_thread_info(self):
"""Gets information on AppDaemon worker threads.
Returns:
A dictionary containing all the information for AppDaemon worker threads.
Examples:
>>> thread_info = self.get_thread_info()
"""
return await self.AD.threading.get_thread_info()
@utils.sync_wrapper
async def get_scheduler_entries(self):
"""Gets information on AppDaemon scheduler entries.
Returns:
A dictionary containing all the information for entries in the AppDaemon scheduler.
Examples:
>>> schedule = self.get_scheduler_entries()
"""
return await self.AD.sched.get_scheduler_entries()
@utils.sync_wrapper
async def get_callback_entries(self):
"""Gets information on AppDaemon callback entries.
Returns:
A dictionary containing all the information for entries in the AppDaemon state,
and event callback table.
Examples:
>>> callbacks = self.get_callback_entries()
"""
return await self.AD.callbacks.get_callback_entries()
@utils.sync_wrapper
async def depends_on_module(self, *modules):
"""Registers a global_modules dependency for an app.
Args:
*modules: Modules to register a dependency on.
Returns:
None.
Examples:
>>> import somemodule
>>> import anothermodule
>>> # later
>>> self.depends_on_module([somemodule)
"""
return await self.AD.app_management.register_module_dependency(
self.name,
*modules
)
<|code_end|>
appdaemon/events.py
<|code_start|>"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, __name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
__name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(__name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[__name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[__name]["pin_thread"]
if __name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[__name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[__name][handle] = {
"name": __name,
"id": self.AD.app_management.objects[__name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
__name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity("admin", "event_callback.{}".format(handle), "active",
{"app": __name, "event_name": event, "function": cb.__name__,
"pinned": pin_app, "pinned_thread": pin_thread, "fired": 0,
"executed": 0, "kwargs": kwargs})
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data['event_type'])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock for time travel
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data['event_type'] == "state_changed":
if 'entity_id' in data['data'] and 'new_state' in data['data']:
entity_id = data['data']['entity_id']
self.AD.state.set_state_simple(namespace, entity_id, data['data']['new_state'])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data['data'])
return
if self.AD.apps is True:# and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
for callback in self.AD.callbacks.callbacks:
for uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
return has_log_callback
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
removes = []
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback[
"namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data['event_type'][:2] != "__")
or data['event_type'] == callback["event"]):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != \
data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if "log" in callback["kwargs"] and callback["kwargs"]["log"] != data["data"]["log_type"]:
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data['event_type'],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"]
})
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
<|code_end|>
|
turn_on service not working with async apps
## What did I do
I bumped into an [issue](https://community.home-assistant.io/t/need-help-with-appdaemon-4-and-async/163937) from the community forum that resulted to be the same problem I had recently. Consider the following simple application:
```python
import appdaemon.plugins.hass.hassapi as hass
class Simple(hass.Hass):
async def toggle(self, light):
super().toggle(light)
def initialize(self):
self.create_task(self.toggle("light.0xccccccfffe995875_light"))
```
## What happened
Next error appears after 10 seconds that the app was initialized
```
2020-01-16 19:27:59.061352 WARNING test5: ------------------------------------------------------------
2020-01-16 19:27:59.062493 WARNING test5: Unexpected error running initialize() for test5
2020-01-16 19:27:59.063452 WARNING test5: ------------------------------------------------------------
2020-01-16 19:27:59.064921 WARNING test5: Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/appdaemon/app_management.py", line 145, in initialize_app
await utils.run_in_executor(self, init)
File "/usr/lib/python3.8/site-packages/appdaemon/utils.py", line 276, in run_in_executor
response = future.result()
asyncio.exceptions.TimeoutError
2020-01-16 19:27:59.065899 WARNING test5: ------------------------------------------------------------
2020-01-16 19:27:59.072580 WARNING AppDaemon: Excessive time spent in utility loop: 10089.0ms, 10088.0ms in check_app_updates(), 1.0ms in other
```
However, if I remove the `async` keyword, it works. Also, if I change `super().toggle(light)` for `self.call_service("homeassistant/toggle", entity_id=light)` and leaving the `async`, it works.
So the problem is that `self.toggle` does not work with async features. It also does not work with `self.turn_on` and `self.turn_off`.
## What do I expect
I'd expect self.toggle to work with async functions.
| appdaemon/__main__.py
<|code_start|>#!/usr/bin/python3
"""AppDaemon main() module.
AppDaemon module that contains main() along with argument parsing, instantiation of the AppDaemon and HTTP Objects,
also creates the loop and kicks everything off
"""
import sys
import argparse
import os
import os.path
import signal
import platform
import yaml
import asyncio
import pytz
import pid
import uvloop
import appdaemon.utils as utils
import appdaemon.appdaemon as ad
import appdaemon.http as adhttp
import appdaemon.logging as logging
class ADMain:
"""
Class to encapsulate all main() functionality.
"""
def __init__(self):
"""Constructor."""
self.logging = None
self.error = None
self.diag = None
self.AD = None
self.http_object = None
self.logger = None
def init_signals(self):
"""Setup signal handling."""
# Windows does not support SIGUSR1 or SIGUSR2
if platform.system() != "Windows":
signal.signal(signal.SIGUSR1, self.handle_sig)
signal.signal(signal.SIGINT, self.handle_sig)
signal.signal(signal.SIGHUP, self.handle_sig)
signal.signal(signal.SIGTERM, self.handle_sig)
# noinspection PyUnusedLocal
def handle_sig(self, signum, frame):
"""Function to handle signals.
SIGUSR1 will result in internal info being dumped to the DIAG log
SIGHUP will force a reload of all apps
SIGINT and SIGTEM both result in AD shutting down
Args:
signum: Signal number being processed.
frame: frame - unused
Returns:
None.
"""
if signum == signal.SIGUSR1:
self.AD.thread_async.call_async_no_wait(self.AD.sched.dump_schedule)
self.AD.thread_async.call_async_no_wait(self.AD.callbacks.dump_callbacks)
self.AD.thread_async.call_async_no_wait(self.AD.threading.dump_threads)
self.AD.thread_async.call_async_no_wait(self.AD.app_management.dump_objects)
self.AD.thread_async.call_async_no_wait(self.AD.sched.dump_sun)
if signum == signal.SIGHUP:
self.AD.thread_async.call_async_no_wait(self.AD.app_management.check_app_updates, mode="term")
if signum == signal.SIGINT:
self.logger.info("Keyboard interrupt")
self.stop()
if signum == signal.SIGTERM:
self.logger.info("SIGTERM Received")
self.stop()
def stop(self):
"""Called by the signal handler to shut AD down.
Returns:
None.
"""
self.logger.info("AppDaemon is shutting down")
self.AD.stop()
if self.http_object is not None:
self.http_object.stop()
# noinspection PyBroadException,PyBroadException
def run(self, appdaemon, hadashboard, admin, api, http):
""" Start AppDaemon up after initial argument parsing.
Args:
appdaemon: Config for AppDaemon Object.
hadashboard: Config for HADashboard Object.
admin: Config for admin Object.
api: Config for API Object
http: Config for HTTP Object
Returns:
None.
"""
try:
# if to use uvloop
if appdaemon.get("uvloop") is True:
self.logger.info("Running AD using uvloop")
uvloop.install()
loop = asyncio.get_event_loop()
# Initialize AppDaemon
self.AD = ad.AppDaemon(self.logging, loop, **appdaemon)
# Initialize Dashboard/API/admin
if http is not None and (hadashboard is not None or admin is not None or api is not False):
self.logger.info("Initializing HTTP")
self.http_object = adhttp.HTTP(self.AD, loop, self.logging, appdaemon, hadashboard, admin, api, http,)
self.AD.register_http(self.http_object)
else:
if http is not None:
self.logger.info("HTTP configured but no consumers are configured - disabling")
else:
self.logger.info("HTTP is disabled")
self.logger.debug("Start Main Loop")
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
#
# Now we are shutting down - perform any necessary cleanup
#
self.AD.terminate()
self.logger.info("AppDaemon is stopped.")
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during run()")
self.logger.warning("-" * 60, exc_info=True)
self.logger.warning("-" * 60)
self.logger.debug("End Loop")
self.logger.info("AppDaemon Exited")
# noinspection PyBroadException
def main(self): # noqa: C901
"""Initial AppDaemon entry point.
Parse command line arguments, load configuration, set up logging.
"""
self.init_signals()
# Get command line args
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", help="full path to config directory", type=str, default=None,
)
parser.add_argument("-p", "--pidfile", help="full path to PID File", default=None)
parser.add_argument(
"-t", "--timewarp", help="speed that the scheduler will work at for time travel", default=1, type=float,
)
parser.add_argument(
"-s", "--starttime", help="start time for scheduler <YYYY-MM-DD HH:MM:SS|YYYY-MM-DD#HH:MM:SS>", type=str,
)
parser.add_argument(
"-e",
"--endtime",
help="end time for scheduler <YYYY-MM-DD HH:MM:SS|YYYY-MM-DD#HH:MM:SS>",
type=str,
default=None,
)
parser.add_argument(
"-D",
"--debug",
help="global debug level",
default="INFO",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
)
parser.add_argument("-m", "--moduledebug", nargs=2, action="append")
parser.add_argument("-v", "--version", action="version", version="%(prog)s " + utils.__version__)
parser.add_argument("--profiledash", help=argparse.SUPPRESS, action="store_true")
args = parser.parse_args()
config_dir = args.config
pidfile = args.pidfile
if config_dir is None:
config_file_yaml = utils.find_path("appdaemon.yaml")
else:
config_file_yaml = os.path.join(config_dir, "appdaemon.yaml")
if config_file_yaml is None:
print("FATAL: no configuration directory defined and defaults not present\n")
parser.print_help()
sys.exit(1)
module_debug = {}
if args.moduledebug is not None:
for arg in args.moduledebug:
module_debug[arg[0]] = arg[1]
#
# First locate secrets file
#
try:
#
# Read config file using environment variables
#
yaml.add_constructor("!env_var", utils._env_var_yaml, Loader=yaml.SafeLoader)
#
# Initially load file to see if secret directive is present
#
yaml.add_constructor("!secret", utils._dummy_secret, Loader=yaml.SafeLoader)
with open(config_file_yaml, "r") as yamlfd:
config_file_contents = yamlfd.read()
config = yaml.load(config_file_contents, Loader=yaml.SafeLoader)
if "secrets" in config:
secrets_file = config["secrets"]
else:
secrets_file = os.path.join(os.path.dirname(config_file_yaml), "secrets.yaml")
#
# Read Secrets
#
if os.path.isfile(secrets_file):
with open(secrets_file, "r") as yamlfd:
secrets_file_contents = yamlfd.read()
utils.secrets = yaml.load(secrets_file_contents, Loader=yaml.SafeLoader)
else:
if "secrets" in config:
print(
"ERROR", "Error loading secrets file: {}".format(config["secrets"]),
)
sys.exit()
#
# Read config file again, this time with secrets
#
yaml.add_constructor("!secret", utils._secret_yaml, Loader=yaml.SafeLoader)
with open(config_file_yaml, "r") as yamlfd:
config_file_contents = yamlfd.read()
config = yaml.load(config_file_contents, Loader=yaml.SafeLoader)
except yaml.YAMLError as exc:
print("ERROR", "Error loading configuration")
if hasattr(exc, "problem_mark"):
if exc.context is not None:
print("ERROR", "parser says")
print("ERROR", str(exc.problem_mark))
print("ERROR", str(exc.problem) + " " + str(exc.context))
else:
print("ERROR", "parser says")
print("ERROR", str(exc.problem_mark))
print("ERROR", str(exc.problem))
sys.exit()
if "appdaemon" not in config:
print("ERROR", "no 'appdaemon' section in {}".format(config_file_yaml))
sys.exit()
appdaemon = config["appdaemon"]
if "disable_apps" not in appdaemon:
appdaemon["disable_apps"] = False
appdaemon["config_dir"] = config_dir
appdaemon["config_file"] = config_file_yaml
appdaemon["app_config_file"] = os.path.join(os.path.dirname(config_file_yaml), "apps.yaml")
appdaemon["module_debug"] = module_debug
if args.starttime is not None:
appdaemon["starttime"] = args.starttime
if args.endtime is not None:
appdaemon["endtime"] = args.endtime
if "timewarp" not in appdaemon:
appdaemon["timewarp"] = args.timewarp
appdaemon["loglevel"] = args.debug
appdaemon["config_dir"] = os.path.dirname(config_file_yaml)
appdaemon["stop_function"] = self.stop
hadashboard = None
if "hadashboard" in config:
if config["hadashboard"] is None:
hadashboard = {}
else:
hadashboard = config["hadashboard"]
hadashboard["profile_dashboard"] = args.profiledash
hadashboard["config_dir"] = config_dir
hadashboard["config_file"] = config_file_yaml
hadashboard["config_dir"] = os.path.dirname(config_file_yaml)
if args.profiledash:
hadashboard["profile_dashboard"] = True
if "dashboard" not in hadashboard:
hadashboard["dashboard"] = True
admin = None
if "admin" in config:
if config["admin"] is None:
admin = {}
else:
admin = config["admin"]
api = None
if "api" in config:
if config["api"] is None:
api = {}
else:
api = config["api"]
http = None
if "http" in config:
http = config["http"]
# Setup _logging
if "log" in config:
print(
"ERROR", "'log' directive deprecated, please convert to new 'logs' syntax",
)
sys.exit(1)
if "logs" in config:
logs = config["logs"]
else:
logs = {}
self.logging = logging.Logging(logs, args.debug)
self.logger = self.logging.get_logger()
if "time_zone" in config["appdaemon"]:
self.logging.set_tz(pytz.timezone(config["appdaemon"]["time_zone"]))
# Startup message
self.logger.info("AppDaemon Version %s starting", utils.__version__)
self.logger.info(
"Python version is %s.%s.%s", sys.version_info[0], sys.version_info[1], sys.version_info[2],
)
self.logger.info("Configuration read from: %s", config_file_yaml)
self.logging.dump_log_config()
self.logger.debug("AppDaemon Section: %s", config.get("appdaemon"))
self.logger.debug("HADashboard Section: %s", config.get("hadashboard"))
exit = False
if "time_zone" not in config["appdaemon"]:
self.logger.error("time_zone not specified in appdaemon.cfg")
exit = True
if "latitude" not in config["appdaemon"]:
self.logger.error("latitude not specified in appdaemon.cfg")
exit = True
if "longitude" not in config["appdaemon"]:
self.logger.error("longitude not specified in appdaemon.cfg")
exit = True
if "elevation" not in config["appdaemon"]:
self.logger.error("elevation not specified in appdaemon.cfg")
exit = True
if exit is True:
sys.exit(1)
utils.check_path("config_file", self.logger, config_file_yaml, pathtype="file")
if pidfile is not None:
self.logger.info("Using pidfile: %s", pidfile)
dir = os.path.dirname(pidfile)
name = os.path.basename(pidfile)
try:
with pid.PidFile(name, dir):
self.run(appdaemon, hadashboard, admin, api, http)
except pid.PidFileError:
self.logger.error("Unable to acquire pidfile - terminating")
else:
self.run(appdaemon, hadashboard, admin, api, http)
def main():
"""Called when run from the command line."""
admain = ADMain()
admain.main()
if __name__ == "__main__":
main()
<|code_end|>
appdaemon/plugins/hass/hassapi.py
<|code_start|>import requests
from ast import literal_eval
from functools import wraps
import appdaemon.adbase as adbase
import appdaemon.adapi as adapi
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def hass_check(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
self = args[0]
ns = self._get_namespace(**kwargs)
plugin = utils.run_coroutine_threadsafe(self, self.AD.plugins.get_plugin_object(ns))
if plugin is None:
self.logger.warning("non_existent namespace (%s) specified in call to %s", ns, func.__name__)
return lambda *args: None
if not utils.run_coroutine_threadsafe(self, plugin.am_reading_messages()):
self.logger.warning("Attempt to call Home Assistant while disconnected: %s", func.__name__)
return lambda *args: None
else:
return func(*args, **kwargs)
return func_wrapper
#
# Define an entities class as a descriptor to enable read only access of HASS state
#
class Hass(adbase.ADBase, adapi.ADAPI):
#
# Internal
#
def __init__(self, ad: AppDaemon, name, logging, args, config, app_config, global_vars):
# Call Super Classes
adbase.ADBase.__init__(self, ad, name, logging, args, config, app_config, global_vars)
adapi.ADAPI.__init__(self, ad, name, logging, args, config, app_config, global_vars)
self.AD = ad
#
# Register specific constraints
#
self.register_constraint("constrain_presence")
self.register_constraint("constrain_input_boolean")
self.register_constraint("constrain_input_select")
#
# Device Trackers
#
def get_trackers(self, **kwargs):
"""Returns a list of all device tracker names.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Examples:
>>> trackers = self.get_trackers()
>>> for tracker in trackers:
>>> do something
>>> people = self.get_trackers(person=True)
>>> for person in people:
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
return (key for key, value in self.get_state(device, **kwargs).items())
def get_tracker_details(self, **kwargs):
"""Returns a list of all device trackers and their associated state.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Examples:
>>> trackers = self.get_tracker_details()
>>> for tracker in trackers:
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
return self.get_state(device, **kwargs)
def get_tracker_state(self, entity_id, **kwargs):
"""Gets the state of a tracker.
Args:
entity_id (str): Fully qualified entity id of the device tracker or person to query, e.g.,
``device_tracker.andrew`` or ``person.andrew``.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
The values returned depend in part on the
configuration and type of device trackers in the system. Simpler tracker
types like ``Locative`` or ``NMAP`` will return one of 2 states:
- ``home``
- ``not_home``
Some types of device tracker are in addition able to supply locations
that have been configured as Geofences, in which case the name of that
location can be returned.
Examples:
>>> state = self.get_tracker_state("device_tracker.andrew")
>>> self.log("state is {}".format(state))
>>> state = self.get_tracker_state("person.andrew")
>>> self.log("state is {}".format(state))
"""
self._check_entity(self._get_namespace(**kwargs), entity_id)
return self.get_state(entity_id, **kwargs)
@utils.sync_wrapper
async def anyone_home(self, **kwargs):
"""Determines if the house/apartment is occupied.
A convenience function to determine if one or more person is home. Use
this in preference to getting the state of ``group.all_devices()`` as it
avoids a race condition when using state change callbacks for device
trackers.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
Returns ``True`` if anyone is at home, ``False`` otherwise.
Examples:
>>> if self.anyone_home():
>>> do something
>>> if self.anyone_home(person=True):
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
state = await self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = await self.split_entity(entity_id)
if thisdevice == device:
if state[entity_id]["state"] == "home":
return True
return False
@utils.sync_wrapper
async def everyone_home(self, **kwargs):
"""Determine if all family's members at home.
A convenience function to determine if everyone is home. Use this in
preference to getting the state of ``group.all_devices()`` as it avoids
a race condition when using state change callbacks for device trackers.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
Returns ``True`` if everyone is at home, ``False`` otherwise.
Examples:
>>> if self.everyone_home():
>>> do something
>>> if self.everyone_home(person=True):
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
state = await self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = await self.split_entity(entity_id)
if thisdevice == device:
if state[entity_id]["state"] != "home":
return False
return True
@utils.sync_wrapper
async def noone_home(self, **kwargs):
"""Determines if the house/apartment is empty.
A convenience function to determine if no people are at home. Use this
in preference to getting the state of ``group.all_devices()`` as it avoids
a race condition when using state change callbacks for device trackers.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
Returns ``True`` if no one is home, ``False`` otherwise.
Examples:
>>> if self.noone_home():
>>> do something
>>> if self.noone_home(person=True):
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
state = await self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = await self.split_entity(entity_id)
if thisdevice == device:
if state[entity_id]["state"] == "home":
return False
return True
#
# Built in constraints
#
def constrain_presence(self, value):
unconstrained = True
if value == "everyone" and not self.everyone_home():
unconstrained = False
elif value == "anyone" and not self.anyone_home():
unconstrained = False
elif value == "noone" and not self.noone_home():
unconstrained = False
return unconstrained
def constrain_person(self, value):
unconstrained = True
if value == "everyone" and not self.everyone_home(person=True):
unconstrained = False
elif value == "anyone" and not self.anyone_home(person=True):
unconstrained = False
elif value == "noone" and not self.noone_home(person=True):
unconstrained = False
return unconstrained
def constrain_input_boolean(self, value):
unconstrained = True
state = self.get_state()
values = value.split(",")
if len(values) == 2:
entity = values[0]
desired_state = values[1]
else:
entity = value
desired_state = "on"
if entity in state and state[entity]["state"] != desired_state:
unconstrained = False
return unconstrained
def constrain_input_select(self, value):
unconstrained = True
state = self.get_state()
values = value.split(",")
entity = values.pop(0)
if entity in state and state[entity]["state"] not in values:
unconstrained = False
return unconstrained
#
# Helper functions for services
#
@hass_check
@utils.sync_wrapper
async def turn_on(self, entity_id, **kwargs):
"""Turns `on` a Home Assistant entity.
This is a convenience function for the ``homeassistant.turn_on``
function. It can turn ``on`` pretty much anything in Home Assistant
that can be turned ``on`` or ``run`` (e.g., `Lights`, `Switches`,
`Scenes`, `Scripts`, etc.).
Args:
entity_id (str): Fully qualified id of the thing to be turned ``on`` (e.g.,
`light.office_lamp`, `scene.downstairs_on`).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Turn `on` a switch.
>>> self.turn_on("switch.backyard_lights")
Turn `on` a scene.
>>> self.turn_on("scene.bedroom_on")
Turn `on` a light and set its color to green.
>>> self.turn_on("light.office_1", color_name = "green")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["namespace"] = namespace
await self.call_service("homeassistant/turn_on", **rargs)
@hass_check
@utils.sync_wrapper
async def turn_off(self, entity_id, **kwargs):
"""Turns `off` a Home Assistant entity.
This is a convenience function for the ``homeassistant.turn_off``
function. It can turn ``off`` pretty much anything in Home Assistant
that can be turned ``off`` (e.g., `Lights`, `Switches`, etc.).
Args:
entity_id (str): Fully qualified id of the thing to be turned ``off`` (e.g.,
`light.office_lamp`, `scene.downstairs_on`).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Turn `off` a switch.
>>> self.turn_off("switch.backyard_lights")
Turn `off` a scene.
>>> self.turn_off("scene.bedroom_on")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["namespace"] = namespace
device, entity = await self.split_entity(entity_id)
if device == "scene":
await self.call_service("homeassistant/turn_on", **rargs)
else:
await self.call_service("homeassistant/turn_off", **rargs)
@hass_check
@utils.sync_wrapper
async def toggle(self, entity_id, **kwargs):
"""Toggles between ``on`` and ``off`` for the selected entity.
This is a convenience function for the ``homeassistant.toggle`` function.
It is able to flip the state of pretty much anything in Home Assistant
that can be turned ``on`` or ``off``.
Args:
entity_id (str): Fully qualified id of the thing to be turned ``off`` (e.g.,
`light.office_lamp`, `scene.downstairs_on`).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.toggle("switch.backyard_lights")
>>> self.toggle("light.office_1", color_name = "green")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["namespace"] = namespace
await self.call_service("homeassistant/toggle", **rargs)
@hass_check
@utils.sync_wrapper
async def set_value(self, entity_id, value, **kwargs):
"""Sets the value of an `input_number`.
This is a convenience function for the ``input_number.set_value``
function. It can set the value of an ``input_number`` in Home Assistant.
Args:
entity_id (str): Fully qualified id of `input_number` to be changed (e.g.,
`input_number.alarm_hour`).
value (int or float): The new value to set the `input_number` to.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.set_value("input_number.alarm_hour", 6)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "value": value}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["value"] = value
rargs["namespace"] = namespace
await self.call_service("input_number/set_value", **rargs)
@hass_check
@utils.sync_wrapper
async def set_textvalue(self, entity_id, value, **kwargs):
"""Sets the value of an `input_text`.
This is a convenience function for the ``input_text.set_value``
function. It can set the value of an `input_text` in Home Assistant.
Args:
entity_id (str): Fully qualified id of `input_text` to be changed (e.g.,
`input_text.text1`).
value (str): The new value to set the `input_text` to.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.set_textvalue("input_text.text1", "hello world")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "value": value}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["value"] = value
rargs["namespace"] = namespace
await self.call_service("input_text/set_value", **rargs)
@hass_check
@utils.sync_wrapper
async def select_option(self, entity_id, option, **kwargs):
"""Sets the value of an `input_option`.
This is a convenience function for the ``input_select.select_option``
function. It can set the value of an `input_select` in Home Assistant.
Args:
entity_id (str): Fully qualified id of `input_select` to be changed (e.g.,
`input_select.mode`).
option (str): The new value to set the `input_select` to.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.select_option("input_select.mode", "Day")
Todo:
* Is option always a str?
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "option": option}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["option"] = option
rargs["namespace"] = namespace
await self.call_service("input_select/select_option", **rargs)
@hass_check
@utils.sync_wrapper
async def notify(self, message, **kwargs):
"""Sends a notification.
This is a convenience function for the ``notify.notify`` service. It
will send a notification to a named notification service. If the name is
not specified, it will default to ``notify/notify``.
Args:
message (str): Message to be sent to the notification service.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
title (str, optional): Title of the notification.
name (str, optional): Name of the notification service.
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.notify("Switching mode to Evening")
>>> self.notify("Switching mode to Evening", title = "Some Subject", name = "smtp")
# will send a message through notify.smtp instead of the default notify.notify
"""
kwargs["message"] = message
if "name" in kwargs:
service = "notify/{}".format(kwargs["name"])
del kwargs["name"]
else:
service = "notify/notify"
await self.call_service(service, **kwargs)
@hass_check
@utils.sync_wrapper
async def persistent_notification(self, message, title=None, id=None):
"""
Args:
message:
title:
id:
Returns:
Todo:
* Finish
"""
kwargs = {"message": message}
if title is not None:
kwargs["title"] = title
if id is not None:
kwargs["notification_id"] = id
await self.call_service("persistent_notification/create", **kwargs)
@hass_check
@utils.sync_wrapper
async def get_history(self, **kwargs):
"""Gets access to the HA Database.
This is a convenience function that allows accessing the HA Database, so the
history state of a device can be retrieved. It allows for a level of flexibility
when retrieving the data, and returns it as a dictionary list. Caution must be
taken when using this, as depending on the size of the database, it can take
a long time to process.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
entity_id (str, optional): Fully qualified id of the device to be querying, e.g.,
``light.office_lamp`` or ``scene.downstairs_on`` This can be any entity_id
in the database. If this is left empty, the state of all entities will be
retrieved within the specified time. If both ``end_time`` and ``start_time``
explained below are declared, and ``entity_id`` is specified, the specified
``entity_id`` will be ignored and the history states of `all` entity_id in
the database will be retrieved within the specified time.
days (int, optional): The days from the present-day walking backwards that is
required from the database.
start_time (optional): The start time from when the data should be retrieved.
This should be the furthest time backwards, like if we wanted to get data from
now until two days ago. Your start time will be the last two days datetime.
``start_time`` time can be either a UTC aware time string like ``2019-04-16 12:00:03+01:00``
or a ``datetime.datetime`` object.
end_time (optional): The end time from when the data should be retrieved. This should
be the latest time like if we wanted to get data from now until two days ago. Your
end time will be today's datetime ``end_time`` time can be either a UTC aware time
string like ``2019-04-16 12:00:03+01:00`` or a ``datetime.datetime`` object. It should
be noted that it is not possible to declare only ``end_time``. If only ``end_time``
is declared without ``start_time`` or ``days``, it will revert to default to the latest
history state. When ``end_time`` is specified, it is not possible to declare ``entity_id``.
If ``entity_id`` is specified, ``end_time`` will be ignored.
callback (callable, optional): If wanting to access the database to get a large amount of data,
using a direct call to this function will take a long time to run and lead to AD cancelling the task.
To get around this, it is better to pass a function, which will be responsible of receiving the result
from the database. The signature of this function follows that of a scheduler call.
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
An iterable list of entity_ids and their history state.
Examples:
Get device state over the last 5 days.
>>> data = self.get_history("light.office_lamp", days = 5)
Get device state over the last 2 days and walk forward.
>>> import datetime
>>> from datetime import timedelta
>>> start_time = datetime.datetime.now() - timedelta(days = 2)
>>> data = self.get_history("light.office_lamp", start_time = start_time)
Get device state from yesterday and walk 5 days back.
>>> import datetime
>>> from datetime import timedelta
>>> end_time = datetime.datetime.now() - timedelta(days = 1)
>>> data = self.get_history(end_time = end_time, days = 5)
"""
namespace = self._get_namespace(**kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "get_history"):
callback = kwargs.pop("callback", None)
if callback is not None and callable(callback):
self.create_task(plugin.get_history(**kwargs), callback)
else:
return await plugin.get_history(**kwargs)
else:
self.logger.warning(
"Wrong Namespace selected, as %s has no database plugin attached to it", namespace,
)
return None
@hass_check
def render_template(self, template, **kwargs):
"""Renders a Home Assistant Template
Args:
template (str): The Home Assistant Template to be rendered.
Keyword Args:
None.
Returns:
The rendered template in a native Python type.
Examples:
>>> self.render_template("{{ states('sun.sun') }}")
Returns (str) above_horizon
>>> self.render_template("{{ is_state('sun.sun', 'above_horizon') }}")
Returns (bool) True
>>> self.render_template("{{ states('sensor.outside_temp') }}")
Returns (float) 97.2
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
rargs = kwargs
rargs["namespace"] = namespace
rargs["template"] = template
result = self.call_service("template/render", **rargs)
try:
return literal_eval(result)
except (SyntaxError, ValueError):
return result
<|code_end|>
| appdaemon/__main__.py
<|code_start|>#!/usr/bin/python3
"""AppDaemon main() module.
AppDaemon module that contains main() along with argument parsing, instantiation of the AppDaemon and HTTP Objects,
also creates the loop and kicks everything off
"""
import argparse
import asyncio
import os
import os.path
import platform
import signal
import sys
import appdaemon.appdaemon as ad
import appdaemon.http as adhttp
import appdaemon.logging as logging
import appdaemon.utils as utils
import pytz
import yaml
try:
import pid
except ImportError:
pid = None
try:
import uvloop
except ImportError:
uvloop = None
class ADMain:
"""
Class to encapsulate all main() functionality.
"""
def __init__(self):
"""Constructor."""
self.logging = None
self.error = None
self.diag = None
self.AD = None
self.http_object = None
self.logger = None
def init_signals(self):
"""Setup signal handling."""
# Windows does not support SIGUSR1 or SIGUSR2
if platform.system() != "Windows":
signal.signal(signal.SIGUSR1, self.handle_sig)
signal.signal(signal.SIGINT, self.handle_sig)
signal.signal(signal.SIGHUP, self.handle_sig)
signal.signal(signal.SIGTERM, self.handle_sig)
# noinspection PyUnusedLocal
def handle_sig(self, signum, frame):
"""Function to handle signals.
SIGUSR1 will result in internal info being dumped to the DIAG log
SIGHUP will force a reload of all apps
SIGINT and SIGTEM both result in AD shutting down
Args:
signum: Signal number being processed.
frame: frame - unused
Returns:
None.
"""
if signum == signal.SIGUSR1:
self.AD.thread_async.call_async_no_wait(self.AD.sched.dump_schedule)
self.AD.thread_async.call_async_no_wait(self.AD.callbacks.dump_callbacks)
self.AD.thread_async.call_async_no_wait(self.AD.threading.dump_threads)
self.AD.thread_async.call_async_no_wait(self.AD.app_management.dump_objects)
self.AD.thread_async.call_async_no_wait(self.AD.sched.dump_sun)
if signum == signal.SIGHUP:
self.AD.thread_async.call_async_no_wait(self.AD.app_management.check_app_updates, mode="term")
if signum == signal.SIGINT:
self.logger.info("Keyboard interrupt")
self.stop()
if signum == signal.SIGTERM:
self.logger.info("SIGTERM Received")
self.stop()
def stop(self):
"""Called by the signal handler to shut AD down.
Returns:
None.
"""
self.logger.info("AppDaemon is shutting down")
self.AD.stop()
if self.http_object is not None:
self.http_object.stop()
# noinspection PyBroadException,PyBroadException
def run(self, appdaemon, hadashboard, admin, api, http):
""" Start AppDaemon up after initial argument parsing.
Args:
appdaemon: Config for AppDaemon Object.
hadashboard: Config for HADashboard Object.
admin: Config for admin Object.
api: Config for API Object
http: Config for HTTP Object
Returns:
None.
"""
try:
# if to use uvloop
if appdaemon.get("uvloop") is True and uvloop:
self.logger.info("Running AD using uvloop")
uvloop.install()
loop = asyncio.get_event_loop()
# Initialize AppDaemon
self.AD = ad.AppDaemon(self.logging, loop, **appdaemon)
# Initialize Dashboard/API/admin
if http is not None and (hadashboard is not None or admin is not None or api is not False):
self.logger.info("Initializing HTTP")
self.http_object = adhttp.HTTP(self.AD, loop, self.logging, appdaemon, hadashboard, admin, api, http,)
self.AD.register_http(self.http_object)
else:
if http is not None:
self.logger.info("HTTP configured but no consumers are configured - disabling")
else:
self.logger.info("HTTP is disabled")
self.logger.debug("Start Main Loop")
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
#
# Now we are shutting down - perform any necessary cleanup
#
self.AD.terminate()
self.logger.info("AppDaemon is stopped.")
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during run()")
self.logger.warning("-" * 60, exc_info=True)
self.logger.warning("-" * 60)
self.logger.debug("End Loop")
self.logger.info("AppDaemon Exited")
# noinspection PyBroadException
def main(self): # noqa: C901
"""Initial AppDaemon entry point.
Parse command line arguments, load configuration, set up logging.
"""
self.init_signals()
# Get command line args
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", help="full path to config directory", type=str, default=None,
)
parser.add_argument("-p", "--pidfile", help="full path to PID File", default=None)
parser.add_argument(
"-t", "--timewarp", help="speed that the scheduler will work at for time travel", default=1, type=float,
)
parser.add_argument(
"-s", "--starttime", help="start time for scheduler <YYYY-MM-DD HH:MM:SS|YYYY-MM-DD#HH:MM:SS>", type=str,
)
parser.add_argument(
"-e",
"--endtime",
help="end time for scheduler <YYYY-MM-DD HH:MM:SS|YYYY-MM-DD#HH:MM:SS>",
type=str,
default=None,
)
parser.add_argument(
"-D",
"--debug",
help="global debug level",
default="INFO",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
)
parser.add_argument("-m", "--moduledebug", nargs=2, action="append")
parser.add_argument("-v", "--version", action="version", version="%(prog)s " + utils.__version__)
parser.add_argument("--profiledash", help=argparse.SUPPRESS, action="store_true")
args = parser.parse_args()
config_dir = args.config
pidfile = args.pidfile
if config_dir is None:
config_file_yaml = utils.find_path("appdaemon.yaml")
else:
config_file_yaml = os.path.join(config_dir, "appdaemon.yaml")
if config_file_yaml is None:
print("FATAL: no configuration directory defined and defaults not present\n")
parser.print_help()
sys.exit(1)
module_debug = {}
if args.moduledebug is not None:
for arg in args.moduledebug:
module_debug[arg[0]] = arg[1]
#
# First locate secrets file
#
try:
#
# Read config file using environment variables
#
yaml.add_constructor("!env_var", utils._env_var_yaml, Loader=yaml.SafeLoader)
#
# Initially load file to see if secret directive is present
#
yaml.add_constructor("!secret", utils._dummy_secret, Loader=yaml.SafeLoader)
with open(config_file_yaml, "r") as yamlfd:
config_file_contents = yamlfd.read()
config = yaml.load(config_file_contents, Loader=yaml.SafeLoader)
if "secrets" in config:
secrets_file = config["secrets"]
else:
secrets_file = os.path.join(os.path.dirname(config_file_yaml), "secrets.yaml")
#
# Read Secrets
#
if os.path.isfile(secrets_file):
with open(secrets_file, "r") as yamlfd:
secrets_file_contents = yamlfd.read()
utils.secrets = yaml.load(secrets_file_contents, Loader=yaml.SafeLoader)
else:
if "secrets" in config:
print(
"ERROR", "Error loading secrets file: {}".format(config["secrets"]),
)
sys.exit()
#
# Read config file again, this time with secrets
#
yaml.add_constructor("!secret", utils._secret_yaml, Loader=yaml.SafeLoader)
with open(config_file_yaml, "r") as yamlfd:
config_file_contents = yamlfd.read()
config = yaml.load(config_file_contents, Loader=yaml.SafeLoader)
except yaml.YAMLError as exc:
print("ERROR", "Error loading configuration")
if hasattr(exc, "problem_mark"):
if exc.context is not None:
print("ERROR", "parser says")
print("ERROR", str(exc.problem_mark))
print("ERROR", str(exc.problem) + " " + str(exc.context))
else:
print("ERROR", "parser says")
print("ERROR", str(exc.problem_mark))
print("ERROR", str(exc.problem))
sys.exit()
if "appdaemon" not in config:
print("ERROR", "no 'appdaemon' section in {}".format(config_file_yaml))
sys.exit()
appdaemon = config["appdaemon"]
if "disable_apps" not in appdaemon:
appdaemon["disable_apps"] = False
appdaemon["config_dir"] = config_dir
appdaemon["config_file"] = config_file_yaml
appdaemon["app_config_file"] = os.path.join(os.path.dirname(config_file_yaml), "apps.yaml")
appdaemon["module_debug"] = module_debug
if args.starttime is not None:
appdaemon["starttime"] = args.starttime
if args.endtime is not None:
appdaemon["endtime"] = args.endtime
if "timewarp" not in appdaemon:
appdaemon["timewarp"] = args.timewarp
appdaemon["loglevel"] = args.debug
appdaemon["config_dir"] = os.path.dirname(config_file_yaml)
appdaemon["stop_function"] = self.stop
hadashboard = None
if "hadashboard" in config:
if config["hadashboard"] is None:
hadashboard = {}
else:
hadashboard = config["hadashboard"]
hadashboard["profile_dashboard"] = args.profiledash
hadashboard["config_dir"] = config_dir
hadashboard["config_file"] = config_file_yaml
hadashboard["config_dir"] = os.path.dirname(config_file_yaml)
if args.profiledash:
hadashboard["profile_dashboard"] = True
if "dashboard" not in hadashboard:
hadashboard["dashboard"] = True
admin = None
if "admin" in config:
if config["admin"] is None:
admin = {}
else:
admin = config["admin"]
api = None
if "api" in config:
if config["api"] is None:
api = {}
else:
api = config["api"]
http = None
if "http" in config:
http = config["http"]
# Setup _logging
if "log" in config:
print(
"ERROR", "'log' directive deprecated, please convert to new 'logs' syntax",
)
sys.exit(1)
if "logs" in config:
logs = config["logs"]
else:
logs = {}
self.logging = logging.Logging(logs, args.debug)
self.logger = self.logging.get_logger()
if "time_zone" in config["appdaemon"]:
self.logging.set_tz(pytz.timezone(config["appdaemon"]["time_zone"]))
# Startup message
self.logger.info("AppDaemon Version %s starting", utils.__version__)
self.logger.info(
"Python version is %s.%s.%s", sys.version_info[0], sys.version_info[1], sys.version_info[2],
)
self.logger.info("Configuration read from: %s", config_file_yaml)
self.logging.dump_log_config()
self.logger.debug("AppDaemon Section: %s", config.get("appdaemon"))
self.logger.debug("HADashboard Section: %s", config.get("hadashboard"))
exit = False
if "time_zone" not in config["appdaemon"]:
self.logger.error("time_zone not specified in appdaemon.cfg")
exit = True
if "latitude" not in config["appdaemon"]:
self.logger.error("latitude not specified in appdaemon.cfg")
exit = True
if "longitude" not in config["appdaemon"]:
self.logger.error("longitude not specified in appdaemon.cfg")
exit = True
if "elevation" not in config["appdaemon"]:
self.logger.error("elevation not specified in appdaemon.cfg")
exit = True
if exit is True:
sys.exit(1)
utils.check_path("config_file", self.logger, config_file_yaml, pathtype="file")
if pidfile is not None:
self.logger.info("Using pidfile: %s", pidfile)
dir = os.path.dirname(pidfile)
name = os.path.basename(pidfile)
try:
with pid.PidFile(name, dir):
self.run(appdaemon, hadashboard, admin, api, http)
except pid.PidFileError:
self.logger.error("Unable to acquire pidfile - terminating")
else:
self.run(appdaemon, hadashboard, admin, api, http)
def main():
"""Called when run from the command line."""
admain = ADMain()
admain.main()
if __name__ == "__main__":
main()
<|code_end|>
appdaemon/plugins/hass/hassapi.py
<|code_start|>import requests
from ast import literal_eval
from functools import wraps
import appdaemon.adbase as adbase
import appdaemon.adapi as adapi
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def hass_check(coro):
@wraps(coro)
async def coro_wrapper(*args, **kwargs):
self = args[0]
ns = self._get_namespace(**kwargs)
plugin = await self.AD.plugins.get_plugin_object(ns)
if plugin is None:
self.logger.warning("non_existent namespace (%s) specified in call to %s", ns, coro.__name__)
return None
if not await plugin.am_reading_messages():
self.logger.warning("Attempt to call Home Assistant while disconnected: %s", coro.__name__)
return None
else:
return await coro(*args, **kwargs)
return coro_wrapper
#
# Define an entities class as a descriptor to enable read only access of HASS state
#
class Hass(adbase.ADBase, adapi.ADAPI):
#
# Internal
#
def __init__(self, ad: AppDaemon, name, logging, args, config, app_config, global_vars):
# Call Super Classes
adbase.ADBase.__init__(self, ad, name, logging, args, config, app_config, global_vars)
adapi.ADAPI.__init__(self, ad, name, logging, args, config, app_config, global_vars)
self.AD = ad
#
# Register specific constraints
#
self.register_constraint("constrain_presence")
self.register_constraint("constrain_input_boolean")
self.register_constraint("constrain_input_select")
#
# Device Trackers
#
def get_trackers(self, **kwargs):
"""Returns a list of all device tracker names.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Examples:
>>> trackers = self.get_trackers()
>>> for tracker in trackers:
>>> do something
>>> people = self.get_trackers(person=True)
>>> for person in people:
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
return (key for key, value in self.get_state(device, **kwargs).items())
def get_tracker_details(self, **kwargs):
"""Returns a list of all device trackers and their associated state.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Examples:
>>> trackers = self.get_tracker_details()
>>> for tracker in trackers:
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
return self.get_state(device, **kwargs)
def get_tracker_state(self, entity_id, **kwargs):
"""Gets the state of a tracker.
Args:
entity_id (str): Fully qualified entity id of the device tracker or person to query, e.g.,
``device_tracker.andrew`` or ``person.andrew``.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
The values returned depend in part on the
configuration and type of device trackers in the system. Simpler tracker
types like ``Locative`` or ``NMAP`` will return one of 2 states:
- ``home``
- ``not_home``
Some types of device tracker are in addition able to supply locations
that have been configured as Geofences, in which case the name of that
location can be returned.
Examples:
>>> state = self.get_tracker_state("device_tracker.andrew")
>>> self.log("state is {}".format(state))
>>> state = self.get_tracker_state("person.andrew")
>>> self.log("state is {}".format(state))
"""
self._check_entity(self._get_namespace(**kwargs), entity_id)
return self.get_state(entity_id, **kwargs)
@utils.sync_wrapper
async def anyone_home(self, **kwargs):
"""Determines if the house/apartment is occupied.
A convenience function to determine if one or more person is home. Use
this in preference to getting the state of ``group.all_devices()`` as it
avoids a race condition when using state change callbacks for device
trackers.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
Returns ``True`` if anyone is at home, ``False`` otherwise.
Examples:
>>> if self.anyone_home():
>>> do something
>>> if self.anyone_home(person=True):
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
state = await self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = await self.split_entity(entity_id)
if thisdevice == device:
if state[entity_id]["state"] == "home":
return True
return False
@utils.sync_wrapper
async def everyone_home(self, **kwargs):
"""Determine if all family's members at home.
A convenience function to determine if everyone is home. Use this in
preference to getting the state of ``group.all_devices()`` as it avoids
a race condition when using state change callbacks for device trackers.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
Returns ``True`` if everyone is at home, ``False`` otherwise.
Examples:
>>> if self.everyone_home():
>>> do something
>>> if self.everyone_home(person=True):
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
state = await self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = await self.split_entity(entity_id)
if thisdevice == device:
if state[entity_id]["state"] != "home":
return False
return True
@utils.sync_wrapper
async def noone_home(self, **kwargs):
"""Determines if the house/apartment is empty.
A convenience function to determine if no people are at home. Use this
in preference to getting the state of ``group.all_devices()`` as it avoids
a race condition when using state change callbacks for device trackers.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
person (boolean, optional): If set to True, use person rather than device_tracker
as the device type to query
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
Returns ``True`` if no one is home, ``False`` otherwise.
Examples:
>>> if self.noone_home():
>>> do something
>>> if self.noone_home(person=True):
>>> do something
"""
if "person" in kwargs and kwargs["person"] is True:
device = "person"
del kwargs["person"]
else:
device = "device_tracker"
state = await self.get_state(**kwargs)
for entity_id in state.keys():
thisdevice, thisentity = await self.split_entity(entity_id)
if thisdevice == device:
if state[entity_id]["state"] == "home":
return False
return True
#
# Built in constraints
#
def constrain_presence(self, value):
unconstrained = True
if value == "everyone" and not self.everyone_home():
unconstrained = False
elif value == "anyone" and not self.anyone_home():
unconstrained = False
elif value == "noone" and not self.noone_home():
unconstrained = False
return unconstrained
def constrain_person(self, value):
unconstrained = True
if value == "everyone" and not self.everyone_home(person=True):
unconstrained = False
elif value == "anyone" and not self.anyone_home(person=True):
unconstrained = False
elif value == "noone" and not self.noone_home(person=True):
unconstrained = False
return unconstrained
def constrain_input_boolean(self, value):
unconstrained = True
state = self.get_state()
values = value.split(",")
if len(values) == 2:
entity = values[0]
desired_state = values[1]
else:
entity = value
desired_state = "on"
if entity in state and state[entity]["state"] != desired_state:
unconstrained = False
return unconstrained
def constrain_input_select(self, value):
unconstrained = True
state = self.get_state()
values = value.split(",")
entity = values.pop(0)
if entity in state and state[entity]["state"] not in values:
unconstrained = False
return unconstrained
#
# Helper functions for services
#
@utils.sync_wrapper
@hass_check
async def turn_on(self, entity_id, **kwargs):
"""Turns `on` a Home Assistant entity.
This is a convenience function for the ``homeassistant.turn_on``
function. It can turn ``on`` pretty much anything in Home Assistant
that can be turned ``on`` or ``run`` (e.g., `Lights`, `Switches`,
`Scenes`, `Scripts`, etc.).
Args:
entity_id (str): Fully qualified id of the thing to be turned ``on`` (e.g.,
`light.office_lamp`, `scene.downstairs_on`).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Turn `on` a switch.
>>> self.turn_on("switch.backyard_lights")
Turn `on` a scene.
>>> self.turn_on("scene.bedroom_on")
Turn `on` a light and set its color to green.
>>> self.turn_on("light.office_1", color_name = "green")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["namespace"] = namespace
await self.call_service("homeassistant/turn_on", **rargs)
@utils.sync_wrapper
@hass_check
async def turn_off(self, entity_id, **kwargs):
"""Turns `off` a Home Assistant entity.
This is a convenience function for the ``homeassistant.turn_off``
function. It can turn ``off`` pretty much anything in Home Assistant
that can be turned ``off`` (e.g., `Lights`, `Switches`, etc.).
Args:
entity_id (str): Fully qualified id of the thing to be turned ``off`` (e.g.,
`light.office_lamp`, `scene.downstairs_on`).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Turn `off` a switch.
>>> self.turn_off("switch.backyard_lights")
Turn `off` a scene.
>>> self.turn_off("scene.bedroom_on")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["namespace"] = namespace
device, entity = await self.split_entity(entity_id)
if device == "scene":
await self.call_service("homeassistant/turn_on", **rargs)
else:
await self.call_service("homeassistant/turn_off", **rargs)
@utils.sync_wrapper
@hass_check
async def toggle(self, entity_id, **kwargs):
"""Toggles between ``on`` and ``off`` for the selected entity.
This is a convenience function for the ``homeassistant.toggle`` function.
It is able to flip the state of pretty much anything in Home Assistant
that can be turned ``on`` or ``off``.
Args:
entity_id (str): Fully qualified id of the thing to be turned ``off`` (e.g.,
`light.office_lamp`, `scene.downstairs_on`).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.toggle("switch.backyard_lights")
>>> self.toggle("light.office_1", color_name = "green")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["namespace"] = namespace
await self.call_service("homeassistant/toggle", **rargs)
@utils.sync_wrapper
@hass_check
async def set_value(self, entity_id, value, **kwargs):
"""Sets the value of an `input_number`.
This is a convenience function for the ``input_number.set_value``
function. It can set the value of an ``input_number`` in Home Assistant.
Args:
entity_id (str): Fully qualified id of `input_number` to be changed (e.g.,
`input_number.alarm_hour`).
value (int or float): The new value to set the `input_number` to.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.set_value("input_number.alarm_hour", 6)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "value": value}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["value"] = value
rargs["namespace"] = namespace
await self.call_service("input_number/set_value", **rargs)
@utils.sync_wrapper
@hass_check
async def set_textvalue(self, entity_id, value, **kwargs):
"""Sets the value of an `input_text`.
This is a convenience function for the ``input_text.set_value``
function. It can set the value of an `input_text` in Home Assistant.
Args:
entity_id (str): Fully qualified id of `input_text` to be changed (e.g.,
`input_text.text1`).
value (str): The new value to set the `input_text` to.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.set_textvalue("input_text.text1", "hello world")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "value": value}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["value"] = value
rargs["namespace"] = namespace
await self.call_service("input_text/set_value", **rargs)
@utils.sync_wrapper
@hass_check
async def select_option(self, entity_id, option, **kwargs):
"""Sets the value of an `input_option`.
This is a convenience function for the ``input_select.select_option``
function. It can set the value of an `input_select` in Home Assistant.
Args:
entity_id (str): Fully qualified id of `input_select` to be changed (e.g.,
`input_select.mode`).
option (str): The new value to set the `input_select` to.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.select_option("input_select.mode", "Day")
Todo:
* Is option always a str?
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self._check_entity(namespace, entity_id)
if kwargs == {}:
rargs = {"entity_id": entity_id, "option": option}
else:
rargs = kwargs
rargs["entity_id"] = entity_id
rargs["option"] = option
rargs["namespace"] = namespace
await self.call_service("input_select/select_option", **rargs)
@utils.sync_wrapper
@hass_check
async def notify(self, message, **kwargs):
"""Sends a notification.
This is a convenience function for the ``notify.notify`` service. It
will send a notification to a named notification service. If the name is
not specified, it will default to ``notify/notify``.
Args:
message (str): Message to be sent to the notification service.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
title (str, optional): Title of the notification.
name (str, optional): Name of the notification service.
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
>>> self.notify("Switching mode to Evening")
>>> self.notify("Switching mode to Evening", title = "Some Subject", name = "smtp")
# will send a message through notify.smtp instead of the default notify.notify
"""
kwargs["message"] = message
if "name" in kwargs:
service = "notify/{}".format(kwargs["name"])
del kwargs["name"]
else:
service = "notify/notify"
await self.call_service(service, **kwargs)
@utils.sync_wrapper
@hass_check
async def persistent_notification(self, message, title=None, id=None):
"""
Args:
message:
title:
id:
Returns:
Todo:
* Finish
"""
kwargs = {"message": message}
if title is not None:
kwargs["title"] = title
if id is not None:
kwargs["notification_id"] = id
await self.call_service("persistent_notification/create", **kwargs)
@utils.sync_wrapper
@hass_check
async def get_history(self, **kwargs):
"""Gets access to the HA Database.
This is a convenience function that allows accessing the HA Database, so the
history state of a device can be retrieved. It allows for a level of flexibility
when retrieving the data, and returns it as a dictionary list. Caution must be
taken when using this, as depending on the size of the database, it can take
a long time to process.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
entity_id (str, optional): Fully qualified id of the device to be querying, e.g.,
``light.office_lamp`` or ``scene.downstairs_on`` This can be any entity_id
in the database. If this is left empty, the state of all entities will be
retrieved within the specified time. If both ``end_time`` and ``start_time``
explained below are declared, and ``entity_id`` is specified, the specified
``entity_id`` will be ignored and the history states of `all` entity_id in
the database will be retrieved within the specified time.
days (int, optional): The days from the present-day walking backwards that is
required from the database.
start_time (optional): The start time from when the data should be retrieved.
This should be the furthest time backwards, like if we wanted to get data from
now until two days ago. Your start time will be the last two days datetime.
``start_time`` time can be either a UTC aware time string like ``2019-04-16 12:00:03+01:00``
or a ``datetime.datetime`` object.
end_time (optional): The end time from when the data should be retrieved. This should
be the latest time like if we wanted to get data from now until two days ago. Your
end time will be today's datetime ``end_time`` time can be either a UTC aware time
string like ``2019-04-16 12:00:03+01:00`` or a ``datetime.datetime`` object. It should
be noted that it is not possible to declare only ``end_time``. If only ``end_time``
is declared without ``start_time`` or ``days``, it will revert to default to the latest
history state. When ``end_time`` is specified, it is not possible to declare ``entity_id``.
If ``entity_id`` is specified, ``end_time`` will be ignored.
callback (callable, optional): If wanting to access the database to get a large amount of data,
using a direct call to this function will take a long time to run and lead to AD cancelling the task.
To get around this, it is better to pass a function, which will be responsible of receiving the result
from the database. The signature of this function follows that of a scheduler call.
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
An iterable list of entity_ids and their history state.
Examples:
Get device state over the last 5 days.
>>> data = self.get_history("light.office_lamp", days = 5)
Get device state over the last 2 days and walk forward.
>>> import datetime
>>> from datetime import timedelta
>>> start_time = datetime.datetime.now() - timedelta(days = 2)
>>> data = self.get_history("light.office_lamp", start_time = start_time)
Get device state from yesterday and walk 5 days back.
>>> import datetime
>>> from datetime import timedelta
>>> end_time = datetime.datetime.now() - timedelta(days = 1)
>>> data = self.get_history(end_time = end_time, days = 5)
"""
namespace = self._get_namespace(**kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "get_history"):
callback = kwargs.pop("callback", None)
if callback is not None and callable(callback):
self.create_task(plugin.get_history(**kwargs), callback)
else:
return await plugin.get_history(**kwargs)
else:
self.logger.warning(
"Wrong Namespace selected, as %s has no database plugin attached to it", namespace,
)
return None
@utils.sync_wrapper
@hass_check
async def render_template(self, template, **kwargs):
"""Renders a Home Assistant Template
Args:
template (str): The Home Assistant Template to be rendered.
Keyword Args:
None.
Returns:
The rendered template in a native Python type.
Examples:
>>> self.render_template("{{ states('sun.sun') }}")
Returns (str) above_horizon
>>> self.render_template("{{ is_state('sun.sun', 'above_horizon') }}")
Returns (bool) True
>>> self.render_template("{{ states('sensor.outside_temp') }}")
Returns (float) 97.2
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
rargs = kwargs
rargs["namespace"] = namespace
rargs["template"] = template
result = await self.call_service("template/render", **rargs)
try:
return literal_eval(result)
except (SyntaxError, ValueError):
return result
<|code_end|>
|
TypeError: a bytes-like object is required, not 'str'
When I build and run (`docker build -t appdaemon:dev . && docker run appdaemon:dev`) the `dev` branch, the app exits with errors.
```
2021-02-21 10:25:51.952206 INFO AppDaemon: AppDaemon Version 4.0.6 starting
2021-02-21 10:25:51.953718 INFO AppDaemon: Python version is 3.8.8
2021-02-21 10:25:51.953840 INFO AppDaemon: Configuration read from: /conf/appdaemon.yaml
2021-02-21 10:25:51.954030 INFO AppDaemon: Added log: AppDaemon
2021-02-21 10:25:51.954185 INFO AppDaemon: Added log: Error
2021-02-21 10:25:51.954361 INFO AppDaemon: Added log: Access
2021-02-21 10:25:51.954522 INFO AppDaemon: Added log: Diag
2021-02-21 10:25:52.012170 INFO AppDaemon: Loading Plugin HASS using class HassPlugin from module hassplugin
2021-02-21 10:25:52.163496 INFO HASS: HASS Plugin Initializing
2021-02-21 10:25:52.187159 INFO HASS: HASS Plugin initialization complete
2021-02-21 10:25:52.187854 INFO AppDaemon: Initializing HTTP
2021-02-21 10:25:52.188573 INFO AppDaemon: Using 'ws' for event stream
2021-02-21 10:25:52.194850 WARNING AppDaemon: ------------------------------------------------------------
2021-02-21 10:25:52.195216 WARNING AppDaemon: Unexpected error in HTTP module
2021-02-21 10:25:52.195461 WARNING AppDaemon: ------------------------------------------------------------
2021-02-21 10:25:52.210179 WARNING AppDaemon: Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/appdaemon/http.py", line 171, in __init__
net = url.netloc.split(":")
TypeError: a bytes-like object is required, not 'str'
...
...
...
2021-02-21 10:25:52.454334 WARNING AppDaemon: ------------------------------------------------------------
2021-02-21 10:25:52.455067 INFO AppDaemon: Previous message repeated 1 times
2021-02-21 10:25:52.455230 WARNING AppDaemon: Unexpected error during process_event()
2021-02-21 10:25:52.455544 WARNING AppDaemon: ------------------------------------------------------------
2021-02-21 10:25:52.455861 WARNING AppDaemon: Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/appdaemon/events.py", line 235, in process_event
await self.AD.http.stream_update(namespace, mydata)
File "/usr/local/lib/python3.8/site-packages/appdaemon/http.py", line 699, in stream_update
self.AD.thread_async.call_async_no_wait(self.stream.process_event, data)
AttributeError: 'HTTP' object has no attribute 'stream'
2021-02-21 10:25:52.456080 WARNING AppDaemon: ------------------------------------------------------------
2021-02-21 10:25:52.462482 WARNING AppDaemon: ------------------------------------------------------------
2021-02-21 10:25:52.462707 WARNING AppDaemon: Unexpected error during run()
2021-02-21 10:25:52.462910 WARNING AppDaemon: ------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/appdaemon/__main__.py", line 149, in run
loop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/usr/local/lib/python3.8/site-packages/appdaemon/admin_loop.py", line 18, in loop
if self.AD.http.stats_update != "none" and self.AD.sched is not None:
AttributeError: 'HTTP' object has no attribute 'stats_update'
2021-02-21 10:25:52.467563 INFO AppDaemon: Previous message repeated 1 times
2021-02-21 10:25:52.468898 INFO AppDaemon: AppDaemon Exited
```
| appdaemon/http.py
<|code_start|>import asyncio
import json
import os
import re
import time
import traceback
import concurrent.futures
from urllib.parse import urlparse
import feedparser
from aiohttp import web
import ssl
import bcrypt
import uuid
from jinja2 import Environment, FileSystemLoader, select_autoescape
import appdaemon.dashboard as addashboard
import appdaemon.utils as utils
import appdaemon.stream.adstream as stream
import appdaemon.admin as adadmin
from appdaemon.appdaemon import AppDaemon
def securedata(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(
self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]),
)
if match:
return await myfunc(*args)
elif ("x-ad-access" in request.headers) and (request.headers["x-ad-access"] == self.password):
return await myfunc(*args)
elif "api_password" in request.query and request.query["api_password"] == self.password:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
def secure(myfunc):
"""
Take care of screen based security
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
else:
if "adcreds" in request.cookies:
match = await utils.run_in_executor(
self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]),
)
if match:
return await myfunc(*args)
else:
return await self.forcelogon(request)
else:
return await self.forcelogon(request)
return wrapper
def route_secure(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None or self.valid_tokens == []:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(
self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"])
)
if match:
return await myfunc(*args)
elif "token" in request.query and request.query["token"] in self.valid_tokens:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
class HTTP:
def __init__(self, ad: AppDaemon, loop, logging, appdaemon, dashboard, admin, api, http):
self.AD = ad
self.logging = logging
self.logger = ad.logging.get_child("_http")
self.access = ad.logging.get_access()
self.appdaemon = appdaemon
self.dashboard = dashboard
self.dashboard_dir = None
self.admin = admin
self.http = http
self.api = api
self.runner = None
self.template_dir = os.path.join(os.path.dirname(__file__), "assets", "templates")
self.password = None
self._process_arg("password", http)
self.valid_tokens = []
self._process_arg("tokens", http)
self.url = None
self._process_arg("url", http)
self.work_factor = 12
self._process_arg("work_factor", http)
self.ssl_certificate = None
self._process_arg("ssl_certificate", http)
self.ssl_key = None
self._process_arg("ssl_key", http)
self.transport = "ws"
self._process_arg("transport", http)
self.logger.info("Using '%s' for event stream", self.transport)
self.config_dir = None
self._process_arg("config_dir", dashboard)
self.static_dirs = {}
self._process_arg("static_dirs", http)
self.stopping = False
self.endpoints = {}
self.app_routes = {}
self.dashboard_obj = None
self.admin_obj = None
self.install_dir = os.path.dirname(__file__)
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
try:
url = urlparse(self.url)
net = url.netloc.split(":")
self.host = net[0]
try:
self.port = net[1]
except IndexError:
self.port = 80
if self.host == "":
raise ValueError("Invalid host for 'url'")
self.app = web.Application()
if "headers" in self.http:
self.app.on_response_prepare.append(self.add_response_headers)
# Setup event stream
self.stream = stream.ADStream(self.AD, self.app, self.transport)
self.loop = loop
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
if self.ssl_certificate is not None and self.ssl_key is not None:
self.context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self.context.load_cert_chain(self.ssl_certificate, self.ssl_key)
else:
self.context = None
self.setup_http_routes()
#
# API
#
if api is not None:
self.logger.info("Starting API")
self.setup_api_routes()
else:
self.logger.info("API is disabled")
#
# Admin
#
if admin is not None:
self.logger.info("Starting Admin Interface")
self.stats_update = "realtime"
self._process_arg("stats_update", admin)
self.admin_obj = adadmin.Admin(
self.config_dir,
logging,
self.AD,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir,
transport=self.transport,
**admin
)
else:
self.logger.info("Admin Interface is disabled")
#
# Dashboards
#
if dashboard is not None:
self.logger.info("Starting Dashboards")
self._process_arg("dashboard_dir", dashboard)
self.compile_on_start = True
self._process_arg("compile_on_start", dashboard)
self.force_compile = False
self._process_arg("force_compile", dashboard)
self.profile_dashboard = False
self._process_arg("profile_dashboard", dashboard)
self.rss_feeds = None
self._process_arg("rss_feeds", dashboard)
self.fa4compatibility = False
self._process_arg("fa4compatibility", dashboard)
if "rss_feeds" in dashboard:
self.rss_feeds = []
for feed in dashboard["rss_feeds"]:
if feed["target"].count(".") != 1:
self.logger.warning("Invalid RSS feed target: %s", feed["target"])
else:
self.rss_feeds.append(feed)
self.rss_update = None
self._process_arg("rss_update", dashboard)
self.rss_last_update = None
# find dashboard dir
if self.dashboard_dir is None:
if self.config_dir is None:
self.dashboard_dir = utils.find_path("dashboards")
else:
self.dashboard_dir = os.path.join(self.config_dir, "dashboards")
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
#
# Setup compile directories
#
if self.config_dir is None:
self.compile_dir = utils.find_path("compiled")
else:
self.compile_dir = os.path.join(self.config_dir, "compiled")
self.dashboard_obj = addashboard.Dashboard(
self.config_dir,
self.logging,
dash_compile_on_start=self.compile_on_start,
dash_force_compile=self.force_compile,
profile_dashboard=self.profile_dashboard,
dashboard_dir=self.dashboard_dir,
fa4compatibility=self.fa4compatibility,
transport=self.transport,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir,
)
self.setup_dashboard_routes()
else:
self.logger.info("Dashboards Disabled")
#
# Finish up and start the server
#
# handler = self.app.make_handler()
# f = loop.create_server(handler, "0.0.0.0", int(self.port), ssl=context)
# loop.create_task(f)
if self.dashboard_obj is not None:
loop.create_task(self.update_rss())
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in HTTP module")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
async def start_server(self):
self.logger.info("Running on port %s", self.port)
self.runner = web.AppRunner(self.app)
await self.runner.setup()
site = web.TCPSite(self.runner, "0.0.0.0", int(self.port), ssl_context=self.context)
await site.start()
async def stop_server(self):
self.logger.info("Shutting down webserver")
#
# We should do this but it makes AD hang so ...
#
# await self.runner.cleanup()
async def add_response_headers(self, request, response):
for header, value in self.http["headers"].items():
response.headers[header] = value
def stop(self):
self.stopping = True
def _process_arg(self, arg, kwargs):
if kwargs:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
@staticmethod
def check_password(password, hash):
return bcrypt.checkpw, str.encode(password), str.encode(hash)
async def forcelogon(self, request):
response = await self.logon_page(request)
return response
async def logon_response(self, request):
try:
data = await request.post()
password = data["password"]
if password == self.password:
self.access.info("Successful logon from %s", request.host)
hashed = bcrypt.hashpw(str.encode(self.password), bcrypt.gensalt(self.work_factor))
if self.admin is not None:
response = await self._admin_page(request)
else:
response = await self._list_dash(request)
self.logger.debug("hashed=%s", hashed)
# Set cookie to last for 1 year
response.set_cookie("adcreds", hashed.decode("utf-8"), max_age=31536000)
else:
self.access.warning("Unsuccessful logon from %s", request.host)
response = await self.logon_page(request)
return response
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in logon_response()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Server error in logon_response()")
# noinspection PyUnusedLocal
@secure
async def list_dash(self, request):
return await self._list_dash(request)
async def _list_dash(self, request):
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard_list)
return web.Response(text=response, content_type="text/html")
@secure
async def load_dash(self, request):
name = request.match_info.get("name", "Anonymous")
params = request.query
skin = params.get("skin", "default")
recompile = params.get("recompile", False)
if recompile == "1":
recompile = True
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard, name, skin, recompile)
return web.Response(text=response, content_type="text/html")
async def update_rss(self):
# Grab RSS Feeds
if self.rss_feeds is not None and self.rss_update is not None:
while not self.stopping:
try:
if self.rss_last_update is None or (self.rss_last_update + self.rss_update) <= time.time():
self.rss_last_update = time.time()
for feed_data in self.rss_feeds:
feed = await utils.run_in_executor(self, feedparser.parse, feed_data["feed"])
if "bozo_exception" in feed:
self.logger.warning(
"Error in RSS feed %s: %s", feed_data["feed"], feed["bozo_exception"],
)
else:
new_state = {"feed": feed}
# RSS Feeds always live in the admin namespace
await self.AD.state.set_state("rss", "admin", feed_data["target"], state=new_state)
await asyncio.sleep(1)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in update_rss()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
#
# REST API
#
@securedata
async def get_ad(self, request):
return web.json_response({"state": {"status": "active"}}, dumps=utils.convert_json)
@securedata
async def get_entity(self, request):
namespace = None
entity_id = None
try:
entity_id = request.match_info.get("entity")
namespace = request.match_info.get("namespace")
self.logger.debug("get_state() called, ns=%s, entity=%s", namespace, entity_id)
state = self.AD.state.get_entity(namespace, entity_id)
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_entity()")
self.logger.warning("Namespace: %s, entity: %s", namespace, entity_id)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_entity()")
@securedata
async def get_namespace(self, request):
namespace = None
try:
namespace = request.match_info.get("namespace")
self.logger.debug("get_namespace() called, ns=%s", namespace)
state = self.AD.state.get_entity(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_namespace()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace()")
@securedata
async def get_namespace_entities(self, request):
namespace = None
try:
namespace = request.match_info.get("namespace")
self.logger.debug("get_namespace_entities() called, ns=%s", namespace)
state = self.AD.state.list_namespace_entities(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_namespace_entities()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace_entities()")
@securedata
async def get_namespaces(self, request):
try:
self.logger.debug("get_namespaces() called)")
state = await self.AD.state.list_namespaces()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_namespaces()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_namespaces()")
@securedata
async def get_services(self, request):
try:
self.logger.debug("get_services() called)")
state = self.AD.services.list_services()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_services()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_services()")
@securedata
async def get_state(self, request):
try:
self.logger.debug("get_state() called")
state = self.AD.state.get_entity()
if state is None:
self.get_response(request, 404, "State Not Found")
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_state()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_state()")
@securedata
async def get_logs(self, request):
try:
self.logger.debug("get_logs() called")
logs = await utils.run_in_executor(self, self.AD.logging.get_admin_logs)
return web.json_response({"logs": logs}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_logs()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_logs()")
# noinspection PyUnusedLocal
@securedata
async def call_service(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get("namespace")
domain = request.match_info.get("domain")
service = request.match_info.get("service")
#
# Some value munging for dashboard
#
for key in data:
if key == "service":
pass
elif key == "rgb_color":
m = re.search(r"\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)", data[key])
if m:
r = m.group(1)
g = m.group(2)
b = m.group(3)
args["rgb_color"] = [r, g, b]
elif key == "xy_color":
m = re.search(r"\s*(\d+\.\d+)\s*,\s*(\d+\.\d+)", data[key])
if m:
x = m.group(1)
y = m.group(2)
args["xy_color"] = [x, y]
elif key == "json_args":
json_args = json.loads(data[key])
for k in json_args.keys():
args[k] = json_args[k]
else:
args[key] = data[key]
self.logger.debug("call_service() args = %s", args)
await self.AD.services.call_service(namespace, domain, service, args)
return web.Response(status=200)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in call_service()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return web.Response(status=500)
@securedata
async def fire_event(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get("namespace")
event = request.match_info.get("event")
#
# Some value munging for dashboard
#
for key in data:
if key == "event":
pass
else:
args[key] = data[key]
self.logger.debug("fire_event() args = %s", args)
await self.AD.events.fire_event(namespace, event, **args)
return web.Response(status=200)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in fire_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return web.Response(status=500)
# noinspection PyUnusedLocal
async def not_found(self, request):
return self.get_response(request, 404, "Not Found")
# Stream Handling
async def stream_update(self, namespace, data):
# self.logger.debug("stream_update() %s:%s", namespace, data)
data["namespace"] = namespace
self.AD.thread_async.call_async_no_wait(self.stream.process_event, data)
# Routes, Status and Templates
def setup_api_routes(self):
self.app.router.add_post("/api/appdaemon/service/{namespace}/{domain}/{service}", self.call_service)
self.app.router.add_post("/api/appdaemon/event/{namespace}/{event}", self.fire_event)
self.app.router.add_get("/api/appdaemon/service/", self.get_services)
self.app.router.add_get("/api/appdaemon/state/{namespace}/{entity}", self.get_entity)
self.app.router.add_get("/api/appdaemon/state/{namespace}", self.get_namespace)
self.app.router.add_get("/api/appdaemon/state/{namespace}/", self.get_namespace_entities)
self.app.router.add_get("/api/appdaemon/state/", self.get_namespaces)
self.app.router.add_get("/api/appdaemon/state", self.get_state)
self.app.router.add_get("/api/appdaemon/logs", self.get_logs)
self.app.router.add_post("/api/appdaemon/{app}", self.call_api)
self.app.router.add_get("/api/appdaemon", self.get_ad)
def setup_http_routes(self):
self.app.router.add_get("/favicon.ico", self.not_found)
self.app.router.add_get("/{gfx}.png", self.not_found)
self.app.router.add_post("/logon_response", self.logon_response)
# Add static path for JavaScript
self.app.router.add_static("/javascript", self.javascript_dir)
# Add static path for fonts
self.app.router.add_static("/fonts", self.fonts_dir)
# Add static path for webfonts
self.app.router.add_static("/webfonts", self.webfonts_dir)
# Add static path for images
self.app.router.add_static("/images", self.images_dir)
# Add static path for css
self.app.router.add_static("/css", self.css_dir)
if self.admin is not None:
self.app.router.add_get("/", self.admin_page)
elif self.dashboard is not None:
self.app.router.add_get("/", self.list_dash)
else:
self.app.router.add_get("/", self.error_page)
#
# For App based Web Server
#
self.app.router.add_get("/app/{route}", self.app_webserver)
#
# Add static path for apps
#
apps_static = os.path.join(self.AD.config_dir, "www")
exists = True
if not os.path.isdir(apps_static): # check if the folder exists
try:
os.mkdir(apps_static)
except OSError:
self.logger.warning("Creation of the Web directory %s failed", apps_static)
exists = False
else:
self.logger.debug("Successfully created the Web directory %s ", apps_static)
if exists:
self.app.router.add_static("/local", apps_static)
#
# Setup user defined static paths
#
for name, static_dir in self.static_dirs.items():
if not os.path.isdir(static_dir): # check if the folder exists
self.logger.warning("The Web directory %s doesn't exist. So static route not set up", static_dir)
else:
self.app.router.add_static("/{}".format(name), static_dir)
self.logger.debug("Successfully created the Web directory %s ", static_dir)
def setup_dashboard_routes(self):
self.app.router.add_get("/list", self.list_dash)
self.app.router.add_get("/{name}", self.load_dash)
# Setup Templates
self.app.router.add_static("/compiled_javascript", self.dashboard_obj.compiled_javascript_dir)
self.app.router.add_static("/compiled_css", self.dashboard_obj.compiled_css_dir)
# Add path for custom_css if it exists
custom_css = os.path.join(self.dashboard_obj.config_dir, "custom_css")
if os.path.isdir(custom_css):
self.app.router.add_static("/custom_css", custom_css)
# API
async def terminate_app(self, name):
if name in self.endpoints:
del self.endpoints[name]
if name in self.app_routes:
del self.app_routes[name]
def get_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in API Call</body></html>".format(
code, error, code, error
)
app = request.match_info.get("app", "system")
if code == 200:
self.access.info("API Call to %s: status: %s", app, code)
else:
self.access.warning("API Call to %s: status: %s, %s", app, code, error)
return web.Response(body=res, status=code)
def get_web_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in Web Service Call</body></html>".format(
code, error, code, error
)
app = request.match_info.get("app", "system")
if code == 200:
self.access.info("Web Call to %s: status: %s", app, code)
else:
self.access.warning("Web Call to %s: status: %s, %s", app, code, error)
return web.Response(text=res, content_type="text/html")
@securedata
async def call_api(self, request):
code = 200
ret = ""
app = request.match_info.get("app")
try:
args = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
try:
ret, code = await self.dispatch_app_by_name(app, args)
except Exception:
self.logger.error("-" * 60)
self.logger.error("Unexpected error during API call")
self.logger.error("-" * 60)
self.logger.error(traceback.format_exc())
self.logger.error("-" * 60)
if code == 404:
return self.get_response(request, 404, "App Not Found")
response = "OK"
self.access.info("API Call to %s: status: %s %s", app, code, response)
return web.json_response(ret, status=code, dumps=utils.convert_json)
# Routes, Status and Templates
async def register_endpoint(self, cb, name):
handle = uuid.uuid4().hex
if name not in self.endpoints:
self.endpoints[name] = {}
self.endpoints[name][handle] = {"callback": cb, "name": name}
return handle
async def unregister_endpoint(self, handle, name):
if name in self.endpoints and handle in self.endpoints[name]:
del self.endpoints[name][handle]
async def dispatch_app_by_name(self, name, args):
callback = None
for app in self.endpoints:
for handle in self.endpoints[app]:
if self.endpoints[app][handle]["name"] == name:
callback = self.endpoints[app][handle]["callback"]
if callback is not None:
if asyncio.iscoroutinefunction(callback):
return await callback(args)
else:
return await utils.run_in_executor(self, callback, args)
else:
return "", 404
#
# App based Web Server
#
async def register_route(self, cb, route, name, **kwargs):
if not asyncio.iscoroutinefunction(cb): # must be async function
self.logger.warning(
"Could not Register Callback for %s, using Route %s as Web Server Route. Callback must be Async",
name,
route,
)
return
handle = uuid.uuid4().hex
if name not in self.app_routes:
self.app_routes[name] = {}
token = kwargs.get("token")
self.app_routes[name][handle] = {"callback": cb, "route": route, "token": token}
return handle
async def unregister_route(self, handle, name):
if name in self.app_routes and handle in self.app_routes[name]:
del self.app_routes[name][handle]
@route_secure
async def app_webserver(self, request):
name = None
route = request.match_info.get("route")
token = request.query.get("token")
code = 404
error = "Requested Server does not exist"
callback = None
for name in self.app_routes:
if callback is not None: # a callback has been collected
break
for handle in self.app_routes[name]:
app_route = self.app_routes[name][handle]["route"]
app_token = self.app_routes[name][handle]["token"]
if app_route == route:
if app_token is not None and app_token != token:
return self.get_web_response(request, "401", "Unauthorized")
callback = self.app_routes[name][handle]["callback"]
break
if callback is not None:
self.access.debug("Web Call to %s for %s", route, name)
try:
f = asyncio.ensure_future(callback(request))
self.AD.futures.add_future(name, f)
return await f
except asyncio.CancelledError:
code = 503
error = "Request was Cancelled"
except Exception:
self.logger.error("-" * 60)
self.logger.error("Unexpected error during Web call")
self.logger.error("-" * 60)
self.logger.error(traceback.format_exc())
self.logger.error("-" * 60)
code = 503
error = "Request had an Error"
return self.get_web_response(request, str(code), error)
#
# Admin
#
@secure
async def admin_page(self, request):
return await self._admin_page(request)
# Insecure version
async def _admin_page(self, request):
response = await self.admin_obj.admin_page(request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def logon_page(self, request):
response = await utils.run_in_executor(self, self.generate_logon_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def error_page(self, request):
response = await utils.run_in_executor(self, self.generate_error_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
def generate_logon_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"]),
)
template = env.get_template("logon.jinja2")
rendered_template = template.render(params)
return rendered_template
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
def generate_error_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"]),
)
template = env.get_template("error.jinja2")
rendered_template = template.render(params)
return rendered_template
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
<|code_end|>
| appdaemon/http.py
<|code_start|>import asyncio
import json
import os
import re
import time
import traceback
import concurrent.futures
from urllib.parse import urlparse
import feedparser
from aiohttp import web
import ssl
import bcrypt
import uuid
from jinja2 import Environment, FileSystemLoader, select_autoescape
import appdaemon.dashboard as addashboard
import appdaemon.utils as utils
import appdaemon.stream.adstream as stream
import appdaemon.admin as adadmin
from appdaemon.appdaemon import AppDaemon
def securedata(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(
self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]),
)
if match:
return await myfunc(*args)
elif ("x-ad-access" in request.headers) and (request.headers["x-ad-access"] == self.password):
return await myfunc(*args)
elif "api_password" in request.query and request.query["api_password"] == self.password:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
def secure(myfunc):
"""
Take care of screen based security
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
else:
if "adcreds" in request.cookies:
match = await utils.run_in_executor(
self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]),
)
if match:
return await myfunc(*args)
else:
return await self.forcelogon(request)
else:
return await self.forcelogon(request)
return wrapper
def route_secure(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None or self.valid_tokens == []:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(
self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"])
)
if match:
return await myfunc(*args)
elif "token" in request.query and request.query["token"] in self.valid_tokens:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
class HTTP:
def __init__(self, ad: AppDaemon, loop, logging, appdaemon, dashboard, admin, api, http):
self.AD = ad
self.logging = logging
self.logger = ad.logging.get_child("_http")
self.access = ad.logging.get_access()
self.appdaemon = appdaemon
self.dashboard = dashboard
self.dashboard_dir = None
self.admin = admin
self.http = http
self.api = api
self.runner = None
self.template_dir = os.path.join(os.path.dirname(__file__), "assets", "templates")
self.password = None
self.valid_tokens = []
self.url = None
self.work_factor = 12
self.ssl_certificate = None
self.ssl_key = None
self.transport = "ws"
self.config_dir = None
self._process_arg("config_dir", dashboard)
self.static_dirs = {}
self._process_http(http)
self.stopping = False
self.endpoints = {}
self.app_routes = {}
self.dashboard_obj = None
self.admin_obj = None
self.install_dir = os.path.dirname(__file__)
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
try:
url = urlparse(self.url)
net = url.netloc.split(":")
self.host = net[0]
try:
self.port = net[1]
except IndexError:
self.port = 80
if self.host == "":
raise ValueError("Invalid host for 'url'")
self.app = web.Application()
if "headers" in self.http:
self.app.on_response_prepare.append(self.add_response_headers)
# Setup event stream
self.stream = stream.ADStream(self.AD, self.app, self.transport)
self.loop = loop
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
if self.ssl_certificate is not None and self.ssl_key is not None:
self.context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self.context.load_cert_chain(self.ssl_certificate, self.ssl_key)
else:
self.context = None
self.setup_http_routes()
#
# API
#
if api is not None:
self.logger.info("Starting API")
self.setup_api_routes()
else:
self.logger.info("API is disabled")
#
# Admin
#
if admin is not None:
self.logger.info("Starting Admin Interface")
self.stats_update = "realtime"
self._process_arg("stats_update", admin)
self.admin_obj = adadmin.Admin(
self.config_dir,
logging,
self.AD,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir,
transport=self.transport,
**admin
)
else:
self.logger.info("Admin Interface is disabled")
#
# Dashboards
#
if dashboard is not None:
self.logger.info("Starting Dashboards")
self._process_arg("dashboard_dir", dashboard)
self.compile_on_start = True
self._process_arg("compile_on_start", dashboard)
self.force_compile = False
self._process_arg("force_compile", dashboard)
self.profile_dashboard = False
self._process_arg("profile_dashboard", dashboard)
self.rss_feeds = None
self._process_arg("rss_feeds", dashboard)
self.fa4compatibility = False
self._process_arg("fa4compatibility", dashboard)
if "rss_feeds" in dashboard:
self.rss_feeds = []
for feed in dashboard["rss_feeds"]:
if feed["target"].count(".") != 1:
self.logger.warning("Invalid RSS feed target: %s", feed["target"])
else:
self.rss_feeds.append(feed)
self.rss_update = None
self._process_arg("rss_update", dashboard)
self.rss_last_update = None
# find dashboard dir
if self.dashboard_dir is None:
if self.config_dir is None:
self.dashboard_dir = utils.find_path("dashboards")
else:
self.dashboard_dir = os.path.join(self.config_dir, "dashboards")
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
#
# Setup compile directories
#
if self.config_dir is None:
self.compile_dir = utils.find_path("compiled")
else:
self.compile_dir = os.path.join(self.config_dir, "compiled")
self.dashboard_obj = addashboard.Dashboard(
self.config_dir,
self.logging,
dash_compile_on_start=self.compile_on_start,
dash_force_compile=self.force_compile,
profile_dashboard=self.profile_dashboard,
dashboard_dir=self.dashboard_dir,
fa4compatibility=self.fa4compatibility,
transport=self.transport,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir,
)
self.setup_dashboard_routes()
else:
self.logger.info("Dashboards Disabled")
#
# Finish up and start the server
#
# handler = self.app.make_handler()
# f = loop.create_server(handler, "0.0.0.0", int(self.port), ssl=context)
# loop.create_task(f)
if self.dashboard_obj is not None:
loop.create_task(self.update_rss())
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in HTTP module")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
def _process_http(self, http):
self._process_arg("password", http)
self._process_arg("tokens", http)
self._process_arg("work_factor", http)
self._process_arg("ssl_certificate", http)
self._process_arg("ssl_key", http)
self._process_arg("url", http)
if not self.url:
self.logger.warning(
"'{arg}' is '{value}'. Please configure appdaemon.yaml".format(arg="url", value=self.url)
)
exit(0)
self._process_arg("transport", http)
self.logger.info("Using '%s' for event stream", self.transport)
self._process_arg("static_dirs", http)
async def start_server(self):
self.logger.info("Running on port %s", self.port)
self.runner = web.AppRunner(self.app)
await self.runner.setup()
site = web.TCPSite(self.runner, "0.0.0.0", int(self.port), ssl_context=self.context)
await site.start()
async def stop_server(self):
self.logger.info("Shutting down webserver")
#
# We should do this but it makes AD hang so ...
#
# await self.runner.cleanup()
async def add_response_headers(self, request, response):
for header, value in self.http["headers"].items():
response.headers[header] = value
def stop(self):
self.stopping = True
def _process_arg(self, arg, kwargs):
if kwargs:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
@staticmethod
def check_password(password, hash):
return bcrypt.checkpw, str.encode(password), str.encode(hash)
async def forcelogon(self, request):
response = await self.logon_page(request)
return response
async def logon_response(self, request):
try:
data = await request.post()
password = data["password"]
if password == self.password:
self.access.info("Successful logon from %s", request.host)
hashed = bcrypt.hashpw(str.encode(self.password), bcrypt.gensalt(self.work_factor))
if self.admin is not None:
response = await self._admin_page(request)
else:
response = await self._list_dash(request)
self.logger.debug("hashed=%s", hashed)
# Set cookie to last for 1 year
response.set_cookie("adcreds", hashed.decode("utf-8"), max_age=31536000)
else:
self.access.warning("Unsuccessful logon from %s", request.host)
response = await self.logon_page(request)
return response
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in logon_response()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Server error in logon_response()")
# noinspection PyUnusedLocal
@secure
async def list_dash(self, request):
return await self._list_dash(request)
async def _list_dash(self, request):
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard_list)
return web.Response(text=response, content_type="text/html")
@secure
async def load_dash(self, request):
name = request.match_info.get("name", "Anonymous")
params = request.query
skin = params.get("skin", "default")
recompile = params.get("recompile", False)
if recompile == "1":
recompile = True
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard, name, skin, recompile)
return web.Response(text=response, content_type="text/html")
async def update_rss(self):
# Grab RSS Feeds
if self.rss_feeds is not None and self.rss_update is not None:
while not self.stopping:
try:
if self.rss_last_update is None or (self.rss_last_update + self.rss_update) <= time.time():
self.rss_last_update = time.time()
for feed_data in self.rss_feeds:
feed = await utils.run_in_executor(self, feedparser.parse, feed_data["feed"])
if "bozo_exception" in feed:
self.logger.warning(
"Error in RSS feed %s: %s", feed_data["feed"], feed["bozo_exception"],
)
else:
new_state = {"feed": feed}
# RSS Feeds always live in the admin namespace
await self.AD.state.set_state("rss", "admin", feed_data["target"], state=new_state)
await asyncio.sleep(1)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in update_rss()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
#
# REST API
#
@securedata
async def get_ad(self, request):
return web.json_response({"state": {"status": "active"}}, dumps=utils.convert_json)
@securedata
async def get_entity(self, request):
namespace = None
entity_id = None
try:
entity_id = request.match_info.get("entity")
namespace = request.match_info.get("namespace")
self.logger.debug("get_state() called, ns=%s, entity=%s", namespace, entity_id)
state = self.AD.state.get_entity(namespace, entity_id)
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_entity()")
self.logger.warning("Namespace: %s, entity: %s", namespace, entity_id)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_entity()")
@securedata
async def get_namespace(self, request):
namespace = None
try:
namespace = request.match_info.get("namespace")
self.logger.debug("get_namespace() called, ns=%s", namespace)
state = self.AD.state.get_entity(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_namespace()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace()")
@securedata
async def get_namespace_entities(self, request):
namespace = None
try:
namespace = request.match_info.get("namespace")
self.logger.debug("get_namespace_entities() called, ns=%s", namespace)
state = self.AD.state.list_namespace_entities(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_namespace_entities()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace_entities()")
@securedata
async def get_namespaces(self, request):
try:
self.logger.debug("get_namespaces() called)")
state = await self.AD.state.list_namespaces()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_namespaces()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_namespaces()")
@securedata
async def get_services(self, request):
try:
self.logger.debug("get_services() called)")
state = self.AD.services.list_services()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_services()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_services()")
@securedata
async def get_state(self, request):
try:
self.logger.debug("get_state() called")
state = self.AD.state.get_entity()
if state is None:
self.get_response(request, 404, "State Not Found")
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_state()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_state()")
@securedata
async def get_logs(self, request):
try:
self.logger.debug("get_logs() called")
logs = await utils.run_in_executor(self, self.AD.logging.get_admin_logs)
return web.json_response({"logs": logs}, dumps=utils.convert_json)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in get_logs()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.get_response(request, 500, "Unexpected error in get_logs()")
# noinspection PyUnusedLocal
@securedata
async def call_service(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get("namespace")
domain = request.match_info.get("domain")
service = request.match_info.get("service")
#
# Some value munging for dashboard
#
for key in data:
if key == "service":
pass
elif key == "rgb_color":
m = re.search(r"\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)", data[key])
if m:
r = m.group(1)
g = m.group(2)
b = m.group(3)
args["rgb_color"] = [r, g, b]
elif key == "xy_color":
m = re.search(r"\s*(\d+\.\d+)\s*,\s*(\d+\.\d+)", data[key])
if m:
x = m.group(1)
y = m.group(2)
args["xy_color"] = [x, y]
elif key == "json_args":
json_args = json.loads(data[key])
for k in json_args.keys():
args[k] = json_args[k]
else:
args[key] = data[key]
self.logger.debug("call_service() args = %s", args)
await self.AD.services.call_service(namespace, domain, service, args)
return web.Response(status=200)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in call_service()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return web.Response(status=500)
@securedata
async def fire_event(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get("namespace")
event = request.match_info.get("event")
#
# Some value munging for dashboard
#
for key in data:
if key == "event":
pass
else:
args[key] = data[key]
self.logger.debug("fire_event() args = %s", args)
await self.AD.events.fire_event(namespace, event, **args)
return web.Response(status=200)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in fire_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return web.Response(status=500)
# noinspection PyUnusedLocal
async def not_found(self, request):
return self.get_response(request, 404, "Not Found")
# Stream Handling
async def stream_update(self, namespace, data):
# self.logger.debug("stream_update() %s:%s", namespace, data)
data["namespace"] = namespace
self.AD.thread_async.call_async_no_wait(self.stream.process_event, data)
# Routes, Status and Templates
def setup_api_routes(self):
self.app.router.add_post("/api/appdaemon/service/{namespace}/{domain}/{service}", self.call_service)
self.app.router.add_post("/api/appdaemon/event/{namespace}/{event}", self.fire_event)
self.app.router.add_get("/api/appdaemon/service/", self.get_services)
self.app.router.add_get("/api/appdaemon/state/{namespace}/{entity}", self.get_entity)
self.app.router.add_get("/api/appdaemon/state/{namespace}", self.get_namespace)
self.app.router.add_get("/api/appdaemon/state/{namespace}/", self.get_namespace_entities)
self.app.router.add_get("/api/appdaemon/state/", self.get_namespaces)
self.app.router.add_get("/api/appdaemon/state", self.get_state)
self.app.router.add_get("/api/appdaemon/logs", self.get_logs)
self.app.router.add_post("/api/appdaemon/{app}", self.call_api)
self.app.router.add_get("/api/appdaemon", self.get_ad)
def setup_http_routes(self):
self.app.router.add_get("/favicon.ico", self.not_found)
self.app.router.add_get("/{gfx}.png", self.not_found)
self.app.router.add_post("/logon_response", self.logon_response)
# Add static path for JavaScript
self.app.router.add_static("/javascript", self.javascript_dir)
# Add static path for fonts
self.app.router.add_static("/fonts", self.fonts_dir)
# Add static path for webfonts
self.app.router.add_static("/webfonts", self.webfonts_dir)
# Add static path for images
self.app.router.add_static("/images", self.images_dir)
# Add static path for css
self.app.router.add_static("/css", self.css_dir)
if self.admin is not None:
self.app.router.add_get("/", self.admin_page)
elif self.dashboard is not None:
self.app.router.add_get("/", self.list_dash)
else:
self.app.router.add_get("/", self.error_page)
#
# For App based Web Server
#
self.app.router.add_get("/app/{route}", self.app_webserver)
#
# Add static path for apps
#
apps_static = os.path.join(self.AD.config_dir, "www")
exists = True
if not os.path.isdir(apps_static): # check if the folder exists
try:
os.mkdir(apps_static)
except OSError:
self.logger.warning("Creation of the Web directory %s failed", apps_static)
exists = False
else:
self.logger.debug("Successfully created the Web directory %s ", apps_static)
if exists:
self.app.router.add_static("/local", apps_static)
#
# Setup user defined static paths
#
for name, static_dir in self.static_dirs.items():
if not os.path.isdir(static_dir): # check if the folder exists
self.logger.warning("The Web directory %s doesn't exist. So static route not set up", static_dir)
else:
self.app.router.add_static("/{}".format(name), static_dir)
self.logger.debug("Successfully created the Web directory %s ", static_dir)
def setup_dashboard_routes(self):
self.app.router.add_get("/list", self.list_dash)
self.app.router.add_get("/{name}", self.load_dash)
# Setup Templates
self.app.router.add_static("/compiled_javascript", self.dashboard_obj.compiled_javascript_dir)
self.app.router.add_static("/compiled_css", self.dashboard_obj.compiled_css_dir)
# Add path for custom_css if it exists
custom_css = os.path.join(self.dashboard_obj.config_dir, "custom_css")
if os.path.isdir(custom_css):
self.app.router.add_static("/custom_css", custom_css)
# API
async def terminate_app(self, name):
if name in self.endpoints:
del self.endpoints[name]
if name in self.app_routes:
del self.app_routes[name]
def get_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in API Call</body></html>".format(
code, error, code, error
)
app = request.match_info.get("app", "system")
if code == 200:
self.access.info("API Call to %s: status: %s", app, code)
else:
self.access.warning("API Call to %s: status: %s, %s", app, code, error)
return web.Response(body=res, status=code)
def get_web_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in Web Service Call</body></html>".format(
code, error, code, error
)
app = request.match_info.get("app", "system")
if code == 200:
self.access.info("Web Call to %s: status: %s", app, code)
else:
self.access.warning("Web Call to %s: status: %s, %s", app, code, error)
return web.Response(text=res, content_type="text/html")
@securedata
async def call_api(self, request):
code = 200
ret = ""
app = request.match_info.get("app")
try:
args = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
try:
ret, code = await self.dispatch_app_by_name(app, args)
except Exception:
self.logger.error("-" * 60)
self.logger.error("Unexpected error during API call")
self.logger.error("-" * 60)
self.logger.error(traceback.format_exc())
self.logger.error("-" * 60)
if code == 404:
return self.get_response(request, 404, "App Not Found")
response = "OK"
self.access.info("API Call to %s: status: %s %s", app, code, response)
return web.json_response(ret, status=code, dumps=utils.convert_json)
# Routes, Status and Templates
async def register_endpoint(self, cb, name):
handle = uuid.uuid4().hex
if name not in self.endpoints:
self.endpoints[name] = {}
self.endpoints[name][handle] = {"callback": cb, "name": name}
return handle
async def unregister_endpoint(self, handle, name):
if name in self.endpoints and handle in self.endpoints[name]:
del self.endpoints[name][handle]
async def dispatch_app_by_name(self, name, args):
callback = None
for app in self.endpoints:
for handle in self.endpoints[app]:
if self.endpoints[app][handle]["name"] == name:
callback = self.endpoints[app][handle]["callback"]
if callback is not None:
if asyncio.iscoroutinefunction(callback):
return await callback(args)
else:
return await utils.run_in_executor(self, callback, args)
else:
return "", 404
#
# App based Web Server
#
async def register_route(self, cb, route, name, **kwargs):
if not asyncio.iscoroutinefunction(cb): # must be async function
self.logger.warning(
"Could not Register Callback for %s, using Route %s as Web Server Route. Callback must be Async",
name,
route,
)
return
handle = uuid.uuid4().hex
if name not in self.app_routes:
self.app_routes[name] = {}
token = kwargs.get("token")
self.app_routes[name][handle] = {"callback": cb, "route": route, "token": token}
return handle
async def unregister_route(self, handle, name):
if name in self.app_routes and handle in self.app_routes[name]:
del self.app_routes[name][handle]
@route_secure
async def app_webserver(self, request):
name = None
route = request.match_info.get("route")
token = request.query.get("token")
code = 404
error = "Requested Server does not exist"
callback = None
for name in self.app_routes:
if callback is not None: # a callback has been collected
break
for handle in self.app_routes[name]:
app_route = self.app_routes[name][handle]["route"]
app_token = self.app_routes[name][handle]["token"]
if app_route == route:
if app_token is not None and app_token != token:
return self.get_web_response(request, "401", "Unauthorized")
callback = self.app_routes[name][handle]["callback"]
break
if callback is not None:
self.access.debug("Web Call to %s for %s", route, name)
try:
f = asyncio.ensure_future(callback(request))
self.AD.futures.add_future(name, f)
return await f
except asyncio.CancelledError:
code = 503
error = "Request was Cancelled"
except Exception:
self.logger.error("-" * 60)
self.logger.error("Unexpected error during Web call")
self.logger.error("-" * 60)
self.logger.error(traceback.format_exc())
self.logger.error("-" * 60)
code = 503
error = "Request had an Error"
return self.get_web_response(request, str(code), error)
#
# Admin
#
@secure
async def admin_page(self, request):
return await self._admin_page(request)
# Insecure version
async def _admin_page(self, request):
response = await self.admin_obj.admin_page(request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def logon_page(self, request):
response = await utils.run_in_executor(self, self.generate_logon_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def error_page(self, request):
response = await utils.run_in_executor(self, self.generate_error_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
def generate_logon_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"]),
)
template = env.get_template("logon.jinja2")
rendered_template = template.render(params)
return rendered_template
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
def generate_error_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"]),
)
template = env.get_template("error.jinja2")
rendered_template = template.render(params)
return rendered_template
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
<|code_end|>
|
Rework AD's internal Service calls
Service calls within AD should no longer wait to return results by default
If results are needed, the user will have 2 options
1. Give the `call_service` api a callback to use
2. Set a flag, that it should wait for the result
This will help significantly speedup the core, as right now only one service call can be ran at the same time
| appdaemon/adapi.py
<|code_start|>import asyncio
import datetime
import inspect
import iso8601
import re
from datetime import timedelta
from copy import deepcopy
from typing import Any, Optional, Callable
# needed for fake coro cb that looks like scheduler
import uuid
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class ADAPI:
"""AppDaemon API class.
This class includes all native API calls to AppDaemon
"""
#
# Internal parameters
#
def __init__(self, ad: AppDaemon, name, logging_obj, args, config, app_config, global_vars):
# Store args
self.AD = ad
self.name = name
self._logging = logging_obj
self.config = config
self.app_config = app_config
self.args = deepcopy(args)
self.app_dir = self.AD.app_dir
self.config_dir = self.AD.config_dir
self.dashboard_dir = None
if self.AD.http is not None:
self.dashboard_dir = self.AD.http.dashboard_dir
self.global_vars = global_vars
self._namespace = "default"
self.logger = self._logging.get_child(name)
self.err = self._logging.get_error().getChild(name)
self.user_logs = {}
if "log_level" in args:
self.logger.setLevel(args["log_level"])
self.err.setLevel(args["log_level"])
if "log" in args:
userlog = self.get_user_log(args["log"])
if userlog is not None:
self.logger = userlog
self.dialogflow_v = 2
@staticmethod
def _sub_stack(msg):
# If msg is a data structure of some type, don't sub
if type(msg) is str:
stack = inspect.stack()
if msg.find("__module__") != -1:
msg = msg.replace("__module__", stack[2][1])
if msg.find("__line__") != -1:
msg = msg.replace("__line__", str(stack[2][2]))
if msg.find("__function__") != -1:
msg = msg.replace("__function__", stack[2][3])
return msg
def _get_namespace(self, **kwargs):
if "namespace" in kwargs:
namespace = kwargs["namespace"]
del kwargs["namespace"]
else:
namespace = self._namespace
return namespace
#
# Logging
#
def _log(self, logger, msg, *args, **kwargs):
#
# Internal
#
if "level" in kwargs:
level = kwargs.pop("level", "INFO")
else:
level = "INFO"
ascii_encode = kwargs.pop("ascii_encode", True)
if ascii_encode is True:
msg = str(msg).encode("utf-8", "replace").decode("ascii", "replace")
logger.log(self._logging.log_levels[level], msg, *args, **kwargs)
def log(self, msg, *args, **kwargs):
"""Logs a message to AppDaemon's main logfile.
Args:
msg (str): The message to log.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
level (str, optional): The log level of the message - takes a string representing the
standard logger levels (Default: ``"WARNING"``).
ascii_encode (bool, optional): Switch to disable the encoding of all log messages to
ascii. Set this to true if you want to log UTF-8 characters (Default: ``True``).
log (str, optional): Send the message to a specific log, either system or user_defined.
System logs are ``main_log``, ``error_log``, ``diag_log`` or ``access_log``.
Any other value in use here must have a corresponding user-defined entity in
the ``logs`` section of appdaemon.yaml.
stack_info (bool, optional): If ``True`` the stack info will included.
Returns:
None.
Examples:
Log a message to the main logfile of the system.
>>> self.log("Log Test: Parameter is %s", some_variable)
Log a message to the specified logfile.
>>> self.log("Log Test: Parameter is %s", some_variable, log="test_log")
Log a message with error-level to the main logfile of the system.
>>> self.log("Log Test: Parameter is %s", some_variable, level = "ERROR")
Log a message using `placeholders` to the main logfile of the system.
>>> self.log("Line: __line__, module: __module__, function: __function__, Msg: Something bad happened")
Log a WARNING message (including the stack info) to the main logfile of the system.
>>> self.log("Stack is", some_value, level="WARNING", stack_info=True)
"""
if "log" in kwargs:
# Its a user defined log
logger = self.get_user_log(kwargs["log"])
kwargs.pop("log")
else:
logger = self.logger
try:
msg = self._sub_stack(msg)
except IndexError as i:
rargs = deepcopy(kwargs)
rargs["level"] = "ERROR"
self._log(self.err, i, *args, **rargs)
self._log(logger, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""Logs a message to AppDaemon's error logfile.
Args:
msg (str): The message to log.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
level (str, optional): The log level of the message - takes a string representing the
standard logger levels.
ascii_encode (bool, optional): Switch to disable the encoding of all log messages to
ascii. Set this to true if you want to log UTF-8 characters (Default: ``True``).
log (str, optional): Send the message to a specific log, either system or user_defined.
System logs are ``main_log``, ``error_log``, ``diag_log`` or ``access_log``.
Any other value in use here must have a corresponding user-defined entity in
the ``logs`` section of appdaemon.yaml.
Returns:
None.
Examples:
Log an error message to the error logfile of the system.
>>> self.error("Some Warning string")
Log an error message with critical-level to the error logfile of the system.
>>> self.error("Some Critical string", level = "CRITICAL")
"""
self._log(self.err, msg, *args, **kwargs)
@utils.sync_wrapper
async def listen_log(self, callback, level="INFO", **kwargs):
"""Registers the App to receive a callback every time an App logs a message.
Args:
callback (function): Function to be called when a message is logged.
level (str): Logging level to be used - lower levels will not be forwarded
to the app (Default: ``"INFO"``).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
log (str, optional): Name of the log to listen to, default is all logs. The name
should be one of the 4 built in types ``main_log``, ``error_log``, ``diag_log``
or ``access_log`` or a user defined log entry.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A unique identifier that can be used to cancel the callback if required.
Since variables created within object methods are local to the function they are
created in, and in all likelihood, the cancellation will be invoked later in a
different function, it is recommended that handles are stored in the object
namespace, e.g., self.handle.
Examples:
Listen to all ``WARNING`` log messages of the system.
>>> self.handle = self.listen_log(self.cb, "WARNING")
Sample callback:
>>> def log_message(self, name, ts, level, type, message, kwargs):
Listen to all ``WARNING`` log messages of the `main_log`.
>>> self.handle = self.listen_log(self.cb, "WARNING", log="main_log")
Listen to all ``WARNING`` log messages of a user-defined logfile.
>>> self.handle = self.listen_log(self.cb, "WARNING", log="my_custom_log")
"""
namespace = kwargs.pop("namespace", "admin")
return await self.AD.logging.add_log_callback(namespace, self.name, callback, level, **kwargs)
@utils.sync_wrapper
async def cancel_listen_log(self, handle):
"""Cancels the log callback for the App.
Args:
handle: The handle returned when the `listen_log` call was made.
Returns:
Boolean.
Examples:
>>> self.cancel_listen_log(handle)
"""
self.logger.debug("Canceling listen_log for %s", self.name)
return await self.AD.logging.cancel_log_callback(self.name, handle)
def get_main_log(self):
"""Returns the underlying logger object used for the main log.
Examples:
Log a critical message to the `main` logfile of the system.
>>> log = self.get_main_log()
>>> log.critical("Log a critical error")
"""
return self.logger
def get_error_log(self):
"""Returns the underlying logger object used for the error log.
Examples:
Log an error message to the `error` logfile of the system.
>>> error_log = self.get_error_log()
>>> error_log.error("Log an error", stack_info=True, exc_info=True)
"""
return self.err
def get_user_log(self, log):
"""Gets the specified-user logger of the App.
Args:
log (str): The name of the log you want to get the underlying logger object from,
as described in the ``logs`` section of ``appdaemon.yaml``.
Returns:
The underlying logger object used for the error log.
Examples:
Log an error message to a user-defined logfile.
>>> log = self.get_user_log("test_log")
>>> log.error("Log an error", stack_info=True, exc_info=True)
"""
logger = None
if log in self.user_logs:
# Did we use it already?
logger = self.user_logs[log]
else:
# Build it on the fly
parent = self.AD.logging.get_user_log(self, log)
if parent is not None:
logger = parent.getChild(self.name)
self.user_logs[log] = logger
if "log_level" in self.args:
logger.setLevel(self.args["log_level"])
return logger
def set_log_level(self, level):
"""Sets a specific log level for the App.
Args:
level (str): Log level.
Returns:
None.
Notes:
Supported log levels: ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``,
``DEBUG``, ``NOTSET``.
Examples:
>>> self.set_log_level("DEBUG")
"""
self.logger.setLevel(self._logging.log_levels[level])
self.err.setLevel(self._logging.log_levels[level])
for log in self.user_logs:
self.user_logs[log].setLevel(self._logging.log_levels[level])
def set_error_level(self, level):
"""Sets the log level to send to the `error` logfile of the system.
Args:
level (str): Error level.
Returns:
None.
Notes:
Supported log levels: ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``,
``DEBUG``, ``NOTSET``.
"""
self.err.setLevel(self._logging.log_levels[level])
#
# Threading
#
@utils.sync_wrapper
async def set_app_pin(self, pin):
"""Sets an App to be pinned or unpinned.
Args:
pin (bool): Sets whether the App becomes pinned or not.
Returns:
None.
Examples:
The following line should be put inside the `initialize()` function.
>>> self.set_app_pin(True)
"""
await self.AD.threading.set_app_pin(self.name, pin)
@utils.sync_wrapper
async def get_app_pin(self):
"""Finds out if the current App is currently pinned or not.
Returns:
bool: ``True`` if the App is pinned, ``False`` otherwise.
Examples:
>>> if self.get_app_pin(True):
>>> self.log("App pinned!")
"""
return await self.AD.threading.get_app_pin(self.name)
@utils.sync_wrapper
async def set_pin_thread(self, thread):
"""Sets the thread that the App will be pinned to.
Args:
thread (int): Number of the thread to pin to. Threads start at 0 and go up to the number
of threads specified in ``appdaemon.yaml`` -1.
Returns:
None.
Examples:
The following line should be put inside the `initialize()` function.
>>> self.set_pin_thread(5)
"""
return await self.AD.threading.set_pin_thread(self.name, thread)
@utils.sync_wrapper
async def get_pin_thread(self):
"""Finds out which thread the App is pinned to.
Returns:
int: The thread number or -1 if the App is not pinned.
Examples:
>>> thread = self.get_pin_thread():
>>> self.log(f"I'm pinned to thread: {thread}")
"""
return await self.AD.threading.get_pin_thread(self.name)
#
# Namespace
#
def set_namespace(self, namespace):
"""Sets a new namespace for the App to use from that point forward.
Args:
namespace (str): Name of the new namespace
Returns:
None.
Examples:
>>> self.set_namespace("hass1")
"""
self._namespace = namespace
def get_namespace(self):
"""Returns the App's namespace."""
return self._namespace
@utils.sync_wrapper
async def namespace_exists(self, namespace):
"""Checks the existence of a namespace in AppDaemon.
Args:
namespace (str): The namespace to be checked if it exists.
Returns:
bool: ``True`` if the namespace exists, ``False`` otherwise.
Examples:
Check if the namespace ``storage`` exists within AD
>>> if self.namespace_exists("storage"):
>>> #do something like create it
"""
return await self.AD.state.namespace_exists(namespace)
@utils.sync_wrapper
async def add_namespace(self, namespace, **kwargs):
"""Used to add a user-defined namespaces from apps, which has a database file associated with it.
This way, when AD restarts these entities will be reloaded into AD with its
previous states within the namespace. This can be used as a basic form of
non-volatile storage of entity data. Depending on the configuration of the
namespace, this function can be setup to constantly be running automatically
or only when AD shutdown. This function also allows for users to manually
execute the command as when needed.
Args:
namespace (str): The namespace to be newly created, which must not be same as the operating namespace
writeback (optional): The writeback to be used.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
writeback (str, optional): The writeback to be used. WIll be safe by default
persist (bool, optional): If to make the namespace persistent. So if AD reboots
it will startup will all the created entities being intact. It is persistent by default
Returns:
The file path to the newly created namespace. WIll be None if not persistent
Examples:
Add a new namespace called `storage`.
>>> self.add_namespace("storage")
"""
if namespace == self.get_namespace(): # if it belongs to this app's namespace
raise ValueError("Cannot add namespace with the same name as operating namespace")
writeback = kwargs.get("writeback", "safe")
persist = kwargs.get("persist", True)
return await self.AD.state.add_namespace(namespace, writeback, persist, self.name)
@utils.sync_wrapper
async def remove_namespace(self, namespace):
"""Used to remove a previously user-defined namespaces from apps, which has a database file associated with it.
Args:
namespace (str): The namespace to be removed, which must not be same as the operating namespace
Returns:
The data within that namespace
Examples:
Removes the namespace called `storage`.
>>> self.remove_namespace("storage")
"""
if namespace == self.get_namespace(): # if it belongs to this app's namespace
raise ValueError("Cannot remove namespace with the same name as operating namespace")
return await self.AD.state.remove_namespace(namespace)
@utils.sync_wrapper
async def list_namespaces(self):
"""Returns a list of available namespaces.
Examples:
>>> self.list_namespaces()
"""
return await self.AD.state.list_namespaces()
@utils.sync_wrapper
async def save_namespace(self, **kwargs):
"""Saves entities created in user-defined namespaces into a file.
This way, when AD restarts these entities will be reloaded into AD with its
previous states within the namespace. This can be used as a basic form of
non-volatile storage of entity data. Depending on the configuration of the
namespace, this function can be setup to constantly be running automatically
or only when AD shutdown. This function also allows for users to manually
execute the command as when needed.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Save all entities of the default namespace.
>>> self.save_namespace()
"""
namespace = self._get_namespace(**kwargs)
await self.AD.state.save_namespace(namespace)
#
# Utility
#
@utils.sync_wrapper
async def get_app(self, name):
"""Gets the instantiated object of another app running within the system.
This is useful for calling functions or accessing variables that reside
in different apps without requiring duplication of code.
Args:
name (str): Name of the app required. This is the name specified in
header section of the config file, not the module or class.
Returns:
An object reference to the class.
Examples:
>>> MyApp = self.get_app("MotionLights")
>>> MyApp.turn_light_on()
"""
return await self.AD.app_management.get_app(name)
@utils.sync_wrapper
async def _check_entity(self, namespace, entity):
if "." not in entity:
raise ValueError("{}: Invalid entity ID: {}".format(self.name, entity))
if not await self.AD.state.entity_exists(namespace, entity):
self.logger.warning("%s: Entity %s not found in namespace %s", self.name, entity, namespace)
@staticmethod
def get_ad_version():
"""Returns a string with the current version of AppDaemon.
Examples:
>>> version = self.get_ad_version()
"""
return utils.__version__
#
# Entity
#
@utils.sync_wrapper
async def add_entity(self, entity_id, state=None, attributes=None, **kwargs):
"""Adds a non-existent entity, by creating it within a namespaces.
If an entity doesn't exists and needs to be created, this function can be used to create it locally.
Please note this only creates the entity locally.
Args:
entity_id (str): The fully qualified entity id (including the device type).
state (str): The state the entity is to have
attributes (dict): The attributes the entity is to have
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Add the entity in the present namespace.
>>> self.add_entity('sensor.living_room')
adds the entity in the `mqtt` namespace.
>>> self.add_entity('mqtt.living_room_temperature', namespace='mqtt')
"""
namespace = self._get_namespace(**kwargs)
if await self.AD.state.entity_exists(namespace, entity_id):
self.logger.warning("%s already exists, will not be adding it", entity_id)
return None
await self.AD.state.add_entity(namespace, entity_id, state, attributes)
return None
@utils.sync_wrapper
async def entity_exists(self, entity_id, **kwargs):
"""Checks the existence of an entity in Home Assistant.
When working with multiple Home Assistant instances, it is possible to specify the
namespace, so that it checks within the right instance in in the event the app is
working in a different instance. Also when using this function, it is also possible
to check if an AppDaemon entity exists.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
bool: ``True`` if the entity id exists, ``False`` otherwise.
Examples:
Check if the entity light.living_room exist within the app's namespace
>>> if self.entity_exists("light.living_room"):
>>> #do something
Check if the entity mqtt.security_settings exist within the `mqtt` namespace
if the app is operating in a different namespace like default
>>> if self.entity_exists("mqtt.security_settings", namespace = "mqtt"):
>>> #do something
"""
namespace = self._get_namespace(**kwargs)
return await self.AD.state.entity_exists(namespace, entity_id)
@utils.sync_wrapper
async def split_entity(self, entity_id, **kwargs):
"""Splits an entity into parts.
This utility function will take a fully qualified entity id of the form ``light.hall_light``
and split it into 2 values, the device and the entity, e.g. light and hall_light.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
A list with 2 entries, the device and entity respectively.
Examples:
Do some action if the device of the entity is `scene`.
>>> device, entity = self.split_entity(entity_id)
>>> if device == "scene":
>>> #do something specific to scenes
"""
await self._check_entity(self._get_namespace(**kwargs), entity_id)
return entity_id.split(".")
@utils.sync_wrapper
async def remove_entity(self, entity_id, **kwargs):
"""Deletes an entity created within a namespaces.
If an entity was created, and its deemed no longer needed, by using this function,
the entity can be removed from AppDaemon permanently.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Delete the entity in the present namespace.
>>> self.remove_entity('sensor.living_room')
Delete the entity in the `mqtt` namespace.
>>> self.remove_entity('mqtt.living_room_temperature', namespace = 'mqtt')
"""
namespace = self._get_namespace(**kwargs)
await self.AD.state.remove_entity(namespace, entity_id)
return None
@staticmethod
def split_device_list(devices):
"""Converts a comma-separated list of device types to an iterable list.
This is intended to assist in use cases where the App takes a list of
entities from an argument, e.g., a list of sensors to monitor. If only
one entry is provided, an iterable list will still be returned to avoid
the need for special processing.
Args:
devices (str): A comma-separated list of devices to be split (without spaces).
Returns:
A list of split devices with 1 or more entries.
Examples:
>>> for sensor in self.split_device_list(self.args["sensors"]):
>>> #do something for each sensor, e.g., make a state subscription
"""
return devices.split(",")
@utils.sync_wrapper
async def get_plugin_config(self, **kwargs):
"""Gets any useful metadata that the plugin may have available.
For instance, for the HASS plugin, this will return Home Assistant configuration
data such as latitude and longitude.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str): Select the namespace of the plugin for which data is desired.
Returns:
A dictionary containing all the configuration information available
from the Home Assistant ``/api/config`` endpoint.
Examples:
>>> config = self.get_plugin_config()
>>> self.log(f'My current position is {config["latitude"]}(Lat), {config["longitude"]}(Long)')
My current position is 50.8333(Lat), 4.3333(Long)
"""
namespace = self._get_namespace(**kwargs)
return await self.AD.plugins.get_plugin_meta(namespace)
@utils.sync_wrapper
async def friendly_name(self, entity_id, **kwargs):
"""Gets the Friendly Name of an entity.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
str: The friendly name of the entity if it exists or the entity id if not.
Examples:
>>> tracker = "device_tracker.andrew"
>>> friendly_name = self.friendly_name(tracker)
>>> tracker_state = self.get_tracker_state(tracker)
>>> self.log(f"{tracker} ({friendly_name}) is {tracker_state}.")
device_tracker.andrew (Andrew Tracker) is on.
"""
await self._check_entity(self._get_namespace(**kwargs), entity_id)
state = await self.get_state(**kwargs)
if entity_id in state:
if "friendly_name" in state[entity_id]["attributes"]:
return state[entity_id]["attributes"]["friendly_name"]
else:
return entity_id
return None
@utils.sync_wrapper
async def set_production_mode(self, mode=True):
"""Deactivates or activates the production mode in AppDaemon.
When called without declaring passing any arguments, mode defaults to ``True``.
Args:
mode (bool): If it is ``True`` the production mode is activated, or deactivated
otherwise.
Returns:
The specified mode or ``None`` if a wrong parameter is passed.
"""
if not isinstance(mode, bool):
self.logger.warning("%s not a valid parameter for Production Mode", mode)
return None
await self.AD.utility.set_production_mode(mode)
return mode
#
# Internal Helper functions
#
def start_app(self, app, **kwargs):
"""Starts an App which can either be running or not.
This Api call cannot start an app which has already been disabled in the App Config.
It essentially only runs the initialize() function in the app, and changes to attributes
like class name or app config is not taken into account.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.start_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "admin"
kwargs["__name"] = self.name
self.call_service("app/start", **kwargs)
return None
def stop_app(self, app, **kwargs):
"""Stops an App which is running.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.stop_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "admin"
kwargs["__name"] = self.name
self.call_service("app/stop", **kwargs)
return None
def restart_app(self, app, **kwargs):
"""Restarts an App which can either be running or not.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.restart_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "admin"
kwargs["__name"] = self.name
self.call_service("app/restart", **kwargs)
return None
def reload_apps(self, **kwargs):
"""Reloads the apps, and loads up those that have changes made to their .yaml or .py files.
This utility function can be used if AppDaemon is running in production mode, and it is
needed to reload apps that changes have been made to.
Args:
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.reload_apps()
"""
kwargs["namespace"] = "admin"
kwargs["__name"] = self.name
self.call_service("app/reload", **kwargs)
return None
#
# Dialogflow
#
def get_dialogflow_intent(self, data):
"""Gets the intent's action from the Google Home response.
Args:
data: Response received from Google Home.
Returns:
A string representing the Intent from the interaction model that was requested,
or ``None``, if no action was received.
Examples:
>>> intent = ADAPI.get_dialogflow_intent(data)
"""
if "result" in data and "action" in data["result"]:
self.dialogflow_v = 1
return data["result"]["action"]
elif "queryResult" in data and "action" in data["queryResult"]:
self.dialogflow_v = 2
return data["queryResult"]["action"]
else:
return None
@staticmethod
def get_dialogflow_slot_value(data, slot=None):
"""Gets slots' values from the interaction model.
Args:
data: Response received from Google Home.
slot (str): Name of the slot. If a name is not specified, all slots will be returned
as a dictionary. If a name is specified but is not found, ``None`` will be returned.
Returns:
A string representing the value of the slot from the interaction model, or a hash of slots.
Examples:
>>> beer_type = ADAPI.get_dialogflow_intent(data, "beer_type")
>>> all_slots = ADAPI.get_dialogflow_intent(data)
"""
if "result" in data:
# using V1 API
contexts = data["result"]["contexts"][0]
if contexts:
parameters = contexts.get("parameters")
else:
parameters = data["result"]["parameters"]
if slot is None:
return parameters
elif slot in parameters:
return parameters[slot]
else:
return None
elif "queryResult" in data:
# using V2 API
contexts = data["queryResult"]["outputContexts"][0]
if contexts:
parameters = contexts.get("parameters")
else:
parameters = data["queryResult"]["parameters"]
if slot is None:
return parameters
elif slot in parameters:
return parameters[slot]
else:
return None
else:
return None
def format_dialogflow_response(self, speech=None):
"""Formats a response to be returned to Google Home, including speech.
Args:
speech (str): The text for Google Home to say.
Returns:
None.
Examples:
>>> ADAPI.format_dialogflow_response(speech = "Hello World")
"""
if self.dialogflow_v == 1:
speech = {"speech": speech, "source": "Appdaemon", "displayText": speech}
elif self.dialogflow_v == 2:
speech = {"fulfillmentText": speech, "source": "Appdaemon"}
else:
speech = None
return speech
#
# Alexa
#
@staticmethod
def format_alexa_response(speech=None, card=None, title=None):
"""Formats a response to be returned to Alex including speech and a card.
Args:
speech (str): The text for Alexa to say.
card (str): Text for the card.
title (str): Title for the card.
Returns:
None.
Examples:
>>> ADAPI.format_alexa_response(speech = "Hello World", card = "Greetings to the world", title = "Hello")
"""
response = {"shouldEndSession": True}
if speech is not None:
response["outputSpeech"] = {"type": "PlainText", "text": speech}
if card is not None:
response["card"] = {"type": "Simple", "title": title, "content": card}
speech = {"version": "1.0", "response": response, "sessionAttributes": {}}
return speech
@staticmethod
def get_alexa_error(data):
"""Gets the error message from the Alexa API response.
Args:
data: Response received from the Alexa API .
Returns:
A string representing the value of message, or ``None`` if no error message was received.
"""
if "request" in data and "err" in data["request"] and "message" in data["request"]["err"]:
return data["request"]["err"]["message"]
else:
return None
@staticmethod
def get_alexa_intent(data):
"""Gets the Intent's name from the Alexa response.
Args:
data: Response received from Alexa.
Returns:
A string representing the Intent's name from the interaction model that was requested,
or ``None``, if no Intent was received.
Examples:
>>> intent = ADAPI.get_alexa_intent(data)
"""
if "request" in data and "intent" in data["request"] and "name" in data["request"]["intent"]:
return data["request"]["intent"]["name"]
else:
return None
@staticmethod
def get_alexa_slot_value(data, slot=None):
"""Gets values for slots from the interaction model.
Args:
data: The request data received from Alexa.
slot: Name of the slot. If a name is not specified, all slots will be returned as
a dictionary. If a name is specified but is not found, None will be returned.
Returns:
A ``string`` representing the value of the slot from the interaction model, or a ``hash`` of slots.
Examples:
>>> beer_type = ADAPI.get_alexa_intent(data, "beer_type")
>>> all_slots = ADAPI.get_alexa_intent(data)
"""
if "request" in data and "intent" in data["request"] and "slots" in data["request"]["intent"]:
if slot is None:
return data["request"]["intent"]["slots"]
else:
if slot in data["request"]["intent"]["slots"] and "value" in data["request"]["intent"]["slots"][slot]:
return data["request"]["intent"]["slots"][slot]["value"]
else:
return None
else:
return None
#
# API
#
@utils.sync_wrapper
async def register_endpoint(
self, callback: Callable[[Any, dict], Any], endpoint: str = None, **kwargs: Optional[dict]
) -> str:
"""Registers an endpoint for API calls into the current App.
Args:
callback: The function to be called when a request is made to the named endpoint.
endpoint (str, optional): The name of the endpoint to be used for the call (Default: ``None``).
This must be unique across all endpoints, and when not given, the name of the app is used as the endpoint.
It is possible to register multiple endpoints to a single app instance.
Keyword Args:
**kwargs (optional): Zero or more keyword arguments.
Returns:
A handle that can be used to remove the registration.
Examples:
It should be noted that the register function, should return a string (can be empty),
and an HTTP OK status response (e.g., `200`. If this is not added as a returned response,
the function will generate an error each time it is processed.
>>> self.register_endpoint(self.my_callback)
>>> self.register_endpoint(self.alexa_cb, "alexa")
>>> async def alexa_cb(self, request, kwargs):
>>> data = await request.json()
>>> self.log(data)
>>> response = {"message": "Hello World"}
>>> return response, 200
"""
if endpoint is None:
endpoint = self.name
if self.AD.http is not None:
return await self.AD.http.register_endpoint(callback, endpoint, self.name, **kwargs)
else:
self.logger.warning(
"register_endpoint for %s failed - HTTP component is not configured", endpoint,
)
@utils.sync_wrapper
async def deregister_endpoint(self, handle: str) -> None:
"""Removes a previously registered endpoint.
Args:
handle: A handle returned by a previous call to ``register_endpoint``
Returns:
None.
Examples:
>>> self.deregister_endpoint(handle)
"""
await self.AD.http.deregister_endpoint(handle, self.name)
#
# Web Route
#
@utils.sync_wrapper
async def register_route(
self, callback: Callable[[Any, dict], Any], route: str = None, **kwargs: Optional[dict]
) -> str:
"""Registers a route for Web requests into the current App.
By registering an app web route, this allows to make use of AD's internal web server to serve
web clients. All routes registered using this api call, can be accessed using
``http://AD_IP:Port/app/route``.
Args:
callback: The function to be called when a request is made to the named route. This must be an async function
route (str, optional): The name of the route to be used for the request (Default: the app's name).
Keyword Args:
**kwargs (optional): Zero or more keyword arguments.
Returns:
A handle that can be used to remove the registration.
Examples:
It should be noted that the register function, should return a string (can be empty),
and an HTTP OK status response (e.g., `200`. If this is not added as a returned response,
the function will generate an error each time it is processed.
>>> self.register_route(my_callback)
>>> self.register_route(stream_cb, "camera")
"""
if route is None:
route = self.name
if self.AD.http is not None:
return await self.AD.http.register_route(callback, route, self.name, **kwargs)
else:
self.logger.warning("register_route for %s filed - HTTP component is not configured", route)
@utils.sync_wrapper
async def deregister_route(self, handle: str) -> None:
"""Removes a previously registered app route.
Args:
handle: A handle returned by a previous call to ``register_app_route``
Returns:
None.
Examples:
>>> self.deregister_route(handle)
"""
await self.AD.http.deregister_route(handle, self.name)
#
# State
#
@utils.sync_wrapper
async def listen_state(self, callback, entity=None, **kwargs):
"""Registers a callback to react to state changes.
This function allows the user to register a callback for a wide variety of state changes.
Args:
callback: Function to be invoked when the requested state change occurs. It must conform
to the standard State Callback format documented `here <APPGUIDE.html#state-callbacks>`__
entity (str, optional): name of an entity or device type. If just a device type is provided,
e.g., `light`, or `binary_sensor`. ``listen_state()`` will subscribe to state changes of all
devices of that type. If a fully qualified entity_id is provided, ``listen_state()`` will
listen for state changes for just that entity.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
attribute (str, optional): Name of an attribute within the entity state object. If this
parameter is specified in addition to a fully qualified ``entity_id``. ``listen_state()``
will subscribe to changes for just that attribute within that specific entity.
The ``new`` and ``old`` parameters in the callback function will be provided with
a single value representing the attribute.
The value ``all`` for attribute has special significance and will listen for any
state change within the specified entity, and supply the callback functions with
the entire state dictionary for the specified entity rather than an individual
attribute value.
new (optional): If ``new`` is supplied as a parameter, callbacks will only be made if the
state of the selected attribute (usually state) in the new state match the value
of ``new``.
old (optional): If ``old`` is supplied as a parameter, callbacks will only be made if the
state of the selected attribute (usually state) in the old state match the value
of ``old``.
duration (int, optional): If ``duration`` is supplied as a parameter, the callback will not
fire unless the state listened for is maintained for that number of seconds. This
requires that a specific attribute is specified (or the default of ``state`` is used),
and should be used in conjunction with the ``old`` or ``new`` parameters, or both. When
the callback is called, it is supplied with the values of ``entity``, ``attr``, ``old``,
and ``new`` that were current at the time the actual event occurred, since the assumption
is that none of them have changed in the intervening period.
If you use ``duration`` when listening for an entire device type rather than a specific
entity, or for all state changes, you may get unpredictable results, so it is recommended
that this parameter is only used in conjunction with the state of specific entities.
timeout (int, optional): If ``timeout`` is supplied as a parameter, the callback will be created as normal,
but after ``timeout`` seconds, the callback will be removed. If activity for the listened state has
occurred that would trigger a duration timer, the duration timer will still be fired even though the
callback has been deleted.
immediate (bool, optional): It enables the countdown for a delay parameter to start
at the time, if given. If the ``duration`` parameter is not given, the callback runs immediately.
What this means is that after the callback is registered, rather than requiring one or more
state changes before it runs, it immediately checks the entity's states based on given
parameters. If the conditions are right, the callback runs immediately at the time of
registering. This can be useful if, for instance, you want the callback to be triggered
immediately if a light is already `on`, or after a ``duration`` if given.
If ``immediate`` is in use, and ``new`` and ``duration`` are both set, AppDaemon will check
if the entity is already set to the new state and if so it will start the clock
immediately. If ``new`` and ``duration`` are not set, ``immediate`` will trigger the callback
immediately and report in its callback the new parameter as the present state of the
entity. If ``attribute`` is specified, the state of the attribute will be used instead of
state. In these cases, ``old`` will be ignored and when the callback is triggered, its
state will be set to ``None``.
oneshot (bool, optional): If ``True``, the callback will be automatically cancelled
after the first state change that results in a callback.
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description. In most cases,
it is safe to ignore this parameter. The value ``global`` for namespace has special
significance and means that the callback will listen to state updates from any plugin.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Sets which thread from the worker pool the callback will be
run by (0 - number of threads -1).
*kwargs (optional): Zero or more keyword arguments that will be supplied to the callback
when it is called.
Notes:
The ``old`` and ``new`` args can be used singly or together.
Returns:
A unique identifier that can be used to cancel the callback if required. Since variables
created within object methods are local to the function they are created in, and in all
likelihood, the cancellation will be invoked later in a different function, it is
recommended that handles are stored in the object namespace, e.g., `self.handle`.
Examples:
Listen for any state change and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback)
Listen for any state change involving a light and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light")
Listen for a state change involving `light.office1` and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1")
Listen for a state change involving `light.office1` and return the entire state as a dict.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "all")
Listen for a change involving the brightness attribute of `light.office1` and return the
brightness attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "brightness")
Listen for a state change involving `light.office1` turning on and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on")
Listen for a change involving `light.office1` changing from brightness 100 to 200 and return the
brightness attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "brightness", old = "100", new = "200")
Listen for a state change involving `light.office1` changing to state on and remaining on for a minute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on", duration = 60)
Listen for a state change involving `light.office1` changing to state on and remaining on for a minute
trigger the delay immediately if the light is already on.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on", duration = 60, immediate = True)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
name = self.name
if entity is not None and "." in entity:
await self._check_entity(namespace, entity)
self.logger.debug("Calling listen_state for %s", self.name)
return await self.AD.state.add_state_callback(name, namespace, entity, callback, kwargs)
@utils.sync_wrapper
async def cancel_listen_state(self, handle):
"""Cancels a ``listen_state()`` callback.
This will mean that the App will no longer be notified for the specific
state change that has been cancelled. Other state changes will continue
to be monitored.
Args:
handle: The handle returned when the ``listen_state()`` call was made.
Returns:
Boolean.
Examples:
>>> self.cancel_listen_state(self.office_light_handle)
"""
self.logger.debug("Canceling listen_state for %s", self.name)
return await self.AD.state.cancel_state_callback(handle, self.name)
@utils.sync_wrapper
async def info_listen_state(self, handle):
"""Gets information on state a callback from its handle.
Args:
handle: The handle returned when the ``listen_state()`` call was made.
Returns:
The values supplied for ``entity``, ``attribute``, and ``kwargs`` when
the callback was initially created.
Examples:
>>> entity, attribute, kwargs = self.info_listen_state(self.handle)
"""
self.logger.debug("Calling info_listen_state for %s", self.name)
return await self.AD.state.info_state_callback(handle, self.name)
@utils.sync_wrapper
async def get_state(self, entity_id=None, attribute=None, default=None, copy=True, **kwargs):
"""Gets the state of any component within Home Assistant.
State updates are continuously tracked, so this call runs locally and does not require
AppDaemon to call back to Home Assistant. In other words, states are updated using a
push-based approach instead of a pull-based one.
Args:
entity_id (str, optional): This is the name of an entity or device type. If just
a device type is provided, e.g., `light` or `binary_sensor`, `get_state()`
will return a dictionary of all devices of that type, indexed by the ``entity_id``,
containing all the state for each entity. If a fully qualified ``entity_id``
is provided, ``get_state()`` will return the state attribute for that entity,
e.g., ``on`` or ``off`` for a light.
attribute (str, optional): Name of an attribute within the entity state object.
If this parameter is specified in addition to a fully qualified ``entity_id``,
a single value representing the attribute will be returned. The value ``all``
for attribute has special significance and will return the entire state
dictionary for the specified entity rather than an individual attribute value.
default (any, optional): The value to return when the requested attribute or the
whole entity doesn't exist (Default: ``None``).
copy (bool, optional): By default, a copy of the stored state object is returned.
When you set ``copy`` to ``False``, you get the same object as is stored
internally by AppDaemon. Avoiding the copying brings a small performance gain,
but also gives you write-access to the internal AppDaemon data structures,
which is dangerous. Only disable copying when you can guarantee not to modify
the returned state object, e.g., you do read-only operations.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
Returns:
The entire state of Home Assistant at that given time, if if ``get_state()``
is called with no parameters. This will consist of a dictionary with a key
for each entity. Under that key will be the standard entity state information.
Examples:
Get the state of the entire system.
>>> state = self.get_state()
Get the state of all switches in the system.
>>> state = self.get_state("switch")
Get the state attribute of `light.office_1`.
>>> state = self.get_state("light.office_1")
Get the brightness attribute of `light.office_1`.
>>> state = self.get_state("light.office_1", attribute="brightness")
Get the entire state of `light.office_1`.
>>> state = self.get_state("light.office_1", attribute="all")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return await self.AD.state.get_state(self.name, namespace, entity_id, attribute, default, copy, **kwargs)
@utils.sync_wrapper
async def set_state(self, entity, **kwargs):
"""Updates the state of the specified entity.
Args:
entity (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
state: New state value to be set.
attributes (optional): Entity's attributes to be updated.
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
replace(bool, optional): If a `replace` flag is given and set to ``True`` and ``attributes``
is provided, AD will attempt to replace its internal entity register with the newly
supplied attributes completely. This can be used to replace attributes in an entity
which are no longer needed. Do take note this is only possible for internal entity state.
For plugin based entities, this is not recommended, as the plugin will mostly replace
the new values, when next it updates.
Returns:
A dictionary that represents the new state of the updated entity.
Examples:
Update the state of an entity.
>>> self.set_state("light.office_1", state="off")
Update the state and attribute of an entity.
>>> self.set_state("light.office_1", state = "on", attributes = {"color_name": "red"})
Update the state of an entity within the specified namespace.
>>> self.set_state("light.office_1", state="off", namespace ="hass")
"""
self.logger.debug("set state: %s, %s", entity, kwargs)
namespace = self._get_namespace(**kwargs)
await self._check_entity(namespace, entity)
if "namespace" in kwargs:
del kwargs["namespace"]
return await self.AD.state.set_state(self.name, namespace, entity, **kwargs)
#
# Service
#
@staticmethod
def _check_service(service: str) -> None:
if service.find("/") == -1:
raise ValueError("Invalid Service Name: {}".format(service))
def register_service(
self, service: str, cb: Callable[[str, str, str, dict], Any], **kwargs: Optional[dict]
) -> None:
"""Registers a service that can be called from other apps, the REST API and the Event Stream
Using this function, an App can register a function to be available in the service registry.
This will automatically make it available to other apps using the `call_service()` API call, as well as publish
it as a service in the REST API and make it available to the `call_service` command in the event stream.
Args:
service: Name of the service, in the format `domain/service`. If the domain does not exist it will be created
cb: A reference to the function to be called when the service is requested. This function may be a regular
function, or it may be async. Note that if it is an async function, it will run on AppDaemon's main loop
meaning that any issues with the service could result in a delay of AppDaemon's core functions.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
Returns:
None
Examples:
>>> self.register_service("myservices/service1", mycallback)
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("register_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
kwargs["__name"] = self.name
self.AD.services.register_service(namespace, d, s, cb, __async="auto", **kwargs)
def deregister_service(self, service: str, **kwargs: Optional[dict]) -> bool:
"""Deregisters a service that had been previously registered
Using this function, an App can deregister a service call, it has initially registered in the service registry.
This will automatically make it unavailable to other apps using the `call_service()` API call, as well as published
as a service in the REST API and make it unavailable to the `call_service` command in the event stream.
This function can only be used, within the app that registered it in the first place
Args:
service: Name of the service, in the format `domain/service`.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
Returns:
Bool
Examples:
>>> self.deregister_service("myservices/service1")
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("deregister_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
kwargs["__name"] = self.name
return self.AD.services.deregister_service(namespace, d, s, **kwargs)
def list_services(self, **kwargs: Optional[dict]) -> list:
"""List all services available within AD
Using this function, an App can request all available services within AD
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
**kwargs: Each service has different parameter requirements. This argument
allows you to specify a comma-separated list of keyword value pairs, e.g.,
`namespace = global`.
namespace(str, optional): If a `namespace` is provided, AppDaemon will request
the services within the given namespace. On the other hand, if no namespace is given,
AppDaemon will use the last specified namespace or the default namespace.
To get all services across AD, pass `global`. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
Returns:
All services within the requested namespace
Examples:
>>> self.list_services(namespace="global")
"""
self.logger.debug("list_services: %s", kwargs)
namespace = kwargs.get("namespace", "global")
return self.AD.services.list_services(namespace) # retrieve services
@utils.sync_wrapper
async def call_service(self, service: str, **kwargs: Optional[dict]) -> Any:
"""Calls a Service within AppDaemon.
This function can call any service and provide any required parameters.
By default, there are standard services that can be called within AD. Other
services that can be called, are dependent on the plugin used, or those registered
by individual apps using the `register_service` api.
In a future release, all available services can be found using AD's Admin UI.
For `listed services`, the part before the first period is the ``domain``,
and the part after is the ``service name`. For instance, `light/turn_on`
has a domain of `light` and a service name of `turn_on`.
Args:
service (str): The service name.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
**kwargs: Each service has different parameter requirements. This argument
allows you to specify a comma-separated list of keyword value pairs, e.g.,
`entity_id = light.office_1`. These parameters will be different for
every service and can be discovered using the developer tools. Most all
service calls require an ``entity_id``.
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
Returns:
Result of the `call_service` function if any
Examples:
HASS
>>> self.call_service("light/turn_on", entity_id = "light.office_lamp", color_name = "red")
>>> self.call_service("notify/notify", title = "Hello", message = "Hello World")
MQTT
>>> call_service("mqtt/subscribe", topic="homeassistant/living_room/light", qos=2)
>>> call_service("mqtt/publish", topic="homeassistant/living_room/light", payload="on")
Utility
>>> call_service("app/restart", app="notify_app", namespace="appdaemon")
>>> call_service("app/stop", app="lights_app", namespace="appdaemon")
>>> call_service("app/reload", namespace="appdaemon")
For Utility, it is important that the `namespace` arg is set to ``appdaemon``
as no app can work within that `namespace`. If not namespace is specified,
calling this function will rise an error.
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("call_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
kwargs["__name"] = self.name
return await self.AD.services.call_service(namespace, d, s, kwargs)
@utils.sync_wrapper
async def run_sequence(self, sequence, **kwargs):
"""Run an AppDaemon Sequence. Sequences are defined in a valid apps.yaml file or inline, and are sequences of
service calls.
Args:
sequence: The sequence name, referring to the correct entry in apps.yaml, or a dict containing
actual commands to run
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
Returns:
A handle that can be used with `cancel_sequence()` to terminate the script.
Examples:
Run a yaml-defined sequence called "sequence.front_room_scene".
>>> handle = self.run_sequence("sequence.front_room_scene")
Run an inline sequence.
>>> handle = self.run_sequence([{"light/turn_on": {"entity_id": "light.office_1"}}, {"sleep": 5}, {"light.turn_off":
{"entity_id": "light.office_1"}}])
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling run_sequence() for %s", self.name)
return await self.AD.sequences.run_sequence(_name, namespace, sequence, **kwargs)
@utils.sync_wrapper
async def cancel_sequence(self, handle):
"""Cancel an AppDaemon Sequence.
Args:
handle: The handle returned by the `run_sequence()` call
Returns:
None.
Examples:
>>> self.run_sequence(handle)
"""
_name = self.name
self.logger.debug("Calling run_sequence() for %s", self.name)
await self.AD.sequences.cancel_sequence(_name, handle)
#
# Events
#
@utils.sync_wrapper
async def listen_event(self, callback, event=None, **kwargs):
"""Registers a callback for a specific event, or any event.
Args:
callback: Function to be invoked when the event is fired.
It must conform to the standard Event Callback format documented `here <APPGUIDE.html#about-event-callbacks>`__
event (optional): Name of the event to subscribe to. Can be a standard
Home Assistant event such as `service_registered` or an arbitrary
custom event such as `"MODE_CHANGE"`. If no event is specified,
`listen_event()` will subscribe to all events.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
oneshot (bool, optional): If ``True``, the callback will be automatically cancelled
after the first state change that results in a callback.
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter. The value ``global``
for namespace has special significance, and means that the callback will
listen to state updates from any plugin.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
timeout (int, optional): If ``timeout`` is supplied as a parameter, the callback will be created as normal,
but after ``timeout`` seconds, the callback will be removed.
**kwargs (optional): One or more keyword value pairs representing App specific
parameters to supply to the callback. If the keywords match values within the
event data, they will act as filters, meaning that if they don't match the
values, the callback will not fire.
As an example of this, a `Minimote` controller when activated will generate
an event called zwave.scene_activated, along with 2 pieces of data that are
specific to the event - entity_id and scene. If you include keyword values
for either of those, the values supplied to the `listen_event()` call must
match the values in the event or it will not fire. If the keywords do not
match any of the data in the event they are simply ignored.
Filtering will work with any event type, but it will be necessary to figure
out the data associated with the event to understand what values can be
filtered on. This can be achieved by examining Home Assistant's `logfiles`
when the event fires.
Returns:
A handle that can be used to cancel the callback.
Examples:
Listen all `"MODE_CHANGE"` events.
>>> self.listen_event(self.mode_event, "MODE_CHANGE")
Listen for a `minimote` event activating scene 3.
>>> self.listen_event(self.generic_event, "zwave.scene_activated", scene_id = 3)
Listen for a `minimote` event activating scene 3 from a specific `minimote`.
>>> self.listen_event(self.generic_event, "zwave.scene_activated", entity_id = "minimote_31", scene_id = 3)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling listen_event for %s", self.name)
return await self.AD.events.add_event_callback(_name, namespace, callback, event, **kwargs)
@utils.sync_wrapper
async def cancel_listen_event(self, handle):
"""Cancels a callback for a specific event.
Args:
handle: A handle returned from a previous call to ``listen_event()``.
Returns:
Boolean.
Examples:
>>> self.cancel_listen_event(handle)
"""
self.logger.debug("Canceling listen_event for %s", self.name)
return await self.AD.events.cancel_event_callback(self.name, handle)
@utils.sync_wrapper
async def info_listen_event(self, handle):
"""Gets information on an event callback from its handle.
Args:
handle: The handle returned when the ``listen_event()`` call was made.
Returns:
The values (service, kwargs) supplied when the callback was initially created.
Examples:
>>> service, kwargs = self.info_listen_event(handle)
"""
self.logger.debug("Calling info_listen_event for %s", self.name)
return await self.AD.events.info_event_callback(self.name, handle)
@utils.sync_wrapper
async def fire_event(self, event, **kwargs):
"""Fires an event on the AppDaemon bus, for apps and plugins.
Args:
event: Name of the event. Can be a standard Home Assistant event such as
`service_registered` or an arbitrary custom event such as "MODE_CHANGE".
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
**kwargs (optional): Zero or more keyword arguments that will be supplied as
part of the event.
Returns:
None.
Examples:
>>> self.fire_event("MY_CUSTOM_EVENT", jam="true")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self.AD.events.fire_event(namespace, event, **kwargs)
#
# Time
#
def parse_utc_string(self, utc_string):
"""Converts a UTC to its string representation.
Args:
utc_string (str): A string that contains a date and time to convert.
Returns:
An POSIX timestamp that is equivalent to the date and time contained in `utc_string`.
"""
return datetime.datetime(*map(int, re.split(r"[^\d]", utc_string)[:-1])).timestamp() + self.get_tz_offset() * 60
def get_tz_offset(self):
"""Returns the timezone difference between UTC and Local Time in minutes."""
return self.AD.tz.utcoffset(self.datetime()).total_seconds() / 60
@staticmethod
def convert_utc(utc):
"""Gets a `datetime` object for the specified UTC.
Home Assistant provides timestamps of several different sorts that may be
used to gain additional insight into state changes. These timestamps are
in UTC and are coded as `ISO 8601` combined date and time strings. This function
will accept one of these strings and convert it to a localised Python
`datetime` object representing the timestamp.
Args:
utc: An `ISO 8601` encoded date and time string in the following
format: `2016-07-13T14:24:02.040658-04:00`
Returns:
A localised Python `datetime` object representing the timestamp.
"""
return iso8601.parse_date(utc)
@utils.sync_wrapper
async def sun_up(self):
"""Determines if the sun is currently up.
Returns:
bool: ``True`` if the sun is up, ``False`` otherwise.
Examples:
>>> if self.sun_up():
>>> #do something
"""
return await self.AD.sched.sun_up()
@utils.sync_wrapper
async def sun_down(self):
"""Determines if the sun is currently down.
Returns:
bool: ``True`` if the sun is down, ``False`` otherwise.
Examples:
>>> if self.sun_down():
>>> #do something
"""
return await self.AD.sched.sun_down()
@utils.sync_wrapper
async def parse_time(self, time_str, name=None, aware=False):
"""Creates a `time` object from its string representation.
This functions takes a string representation of a time, or sunrise,
or sunset offset and converts it to a datetime.time object.
Args:
time_str (str): A representation of the time in a string format with one
of the following formats:
a. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
b. ``sunrise|sunset [+|- HH:MM:SS]`` - time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes and seconds.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
aware (bool, optional): If ``True`` the created time object will be aware of timezone.
Returns:
A `time` object, representing the time given in the `time_str` argument.
Examples:
>>> self.parse_time("17:30:00")
17:30:00
>>> time = self.parse_time("sunrise")
04:33:17
>>> time = self.parse_time("sunset + 00:30:00")
19:18:48
>>> time = self.parse_time("sunrise + 01:00:00")
05:33:17
"""
return await self.AD.sched.parse_time(time_str, name, aware)
@utils.sync_wrapper
async def parse_datetime(self, time_str, name=None, aware=False):
"""Creates a `datetime` object from its string representation.
This function takes a string representation of a date and time, or sunrise,
or sunset offset and converts it to a `datetime` object.
Args:
time_str (str): A string representation of the datetime with one of the
following formats:
a. ``YY-MM-DD-HH:MM:SS`` - the date and time in Year, Month, Day, Hours,
Minutes, and Seconds, 24 hour format.
b. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
c. ``sunrise|sunset [+|- HH:MM:SS]`` - time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes and seconds.
If the ``HH:MM:SS`` format is used, the resulting datetime object will have
today's date.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
aware (bool, optional): If ``True`` the created datetime object will be aware
of timezone.
Returns:
A `datetime` object, representing the time and date given in the
`time_str` argument.
Examples:
>>> self.parse_datetime("2018-08-09 17:30:00")
2018-08-09 17:30:00
>>> self.parse_datetime("17:30:00")
2019-08-15 17:30:00
>>> self.parse_datetime("sunrise")
2019-08-16 05:33:17
>>> self.parse_datetime("sunset + 00:30:00")
2019-08-16 19:18:48
>>> self.parse_datetime("sunrise + 01:00:00")
2019-08-16 06:33:17
"""
return await self.AD.sched.parse_datetime(time_str, name, aware)
@utils.sync_wrapper
async def get_now(self):
"""Returns the current Local Date and Time.
Examples:
>>> self.get_now()
2019-08-16 21:17:41.098813+00:00
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz)
@utils.sync_wrapper
async def get_now_ts(self):
"""Returns the current Local Timestamp.
Examples:
>>> self.get_now_ts()
1565990318.728324
"""
return await self.AD.sched.get_now_ts()
@utils.sync_wrapper
async def now_is_between(self, start_time, end_time, name=None):
"""Determines is the current `time` is within the specified start and end times.
This function takes two string representations of a ``time``, or ``sunrise`` or ``sunset``
offset and returns ``true`` if the current time is between those 2 times. Its
implementation can correctly handle transitions across midnight.
Args:
start_time (str): A string representation of the start time.
end_time (str): A string representation of the end time.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
Returns:
bool: ``True`` if the current time is within the specified start and end times,
``False`` otherwise.
Notes:
The string representation of the ``start_time`` and ``end_time`` should follows
one of these formats:
a. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
b. ``sunrise|sunset [+|- HH:MM:SS]``- time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes,
and Seconds.
Examples:
>>> if self.now_is_between("17:30:00", "08:00:00"):
>>> #do something
>>> if self.now_is_between("sunset - 00:45:00", "sunrise + 00:45:00"):
>>> #do something
"""
return await self.AD.sched.now_is_between(start_time, end_time, name)
@utils.sync_wrapper
async def sunrise(self, aware=False):
"""Returns a `datetime` object that represents the next time Sunrise will occur.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.sunrise()
2019-08-16 05:33:17
"""
return await self.AD.sched.sunrise(aware)
@utils.sync_wrapper
async def sunset(self, aware=False):
"""Returns a `datetime` object that represents the next time Sunset will occur.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.sunset()
2019-08-16 19:48:48
"""
return await self.AD.sched.sunset(aware)
@utils.sync_wrapper
async def time(self):
"""Returns a localised `time` object representing the current Local Time.
Use this in preference to the standard Python ways to discover the current time,
especially when using the "Time Travel" feature for testing.
Examples:
>>> self.time()
20:15:31.295751
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz).time()
@utils.sync_wrapper
async def datetime(self, aware=False):
"""Returns a `datetime` object representing the current Local Date and Time.
Use this in preference to the standard Python ways to discover the current
datetime, especially when using the "Time Travel" feature for testing.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.datetime()
2019-08-15 20:15:55.549379
"""
if aware is True:
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz)
else:
return await self.AD.sched.get_now_naive()
@utils.sync_wrapper
async def date(self):
"""Returns a localised `date` object representing the current Local Date.
Use this in preference to the standard Python ways to discover the current date,
especially when using the "Time Travel" feature for testing.
Examples:
>>> self.date()
2019-08-15
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz).date()
def get_timezone(self):
"""Returns the current time zone."""
return self.AD.time_zone
#
# Scheduler
#
@utils.sync_wrapper
async def timer_running(self, handle):
"""Checks if a previously created timer is still running.
Args:
handle: A handle value returned from the original call to create the timer.
Returns:
Boolean.
Examples:
>>> self.timer_running(handle)
"""
name = self.name
self.logger.debug("Checking timer with handle %s for %s", handle, self.name)
return self.AD.sched.timer_running(name, handle)
@utils.sync_wrapper
async def cancel_timer(self, handle):
"""Cancels a previously created timer.
Args:
handle: A handle value returned from the original call to create the timer.
Returns:
Boolean.
Examples:
>>> self.cancel_timer(handle)
"""
name = self.name
self.logger.debug("Canceling timer with handle %s for %s", handle, self.name)
return await self.AD.sched.cancel_timer(name, handle)
@utils.sync_wrapper
async def info_timer(self, handle):
"""Gets information on a scheduler event from its handle.
Args:
handle: The handle returned when the scheduler call was made.
Returns:
`time` - datetime object representing the next time the callback will be fired
`interval` - repeat interval if applicable, `0` otherwise.
`kwargs` - the values supplied when the callback was initially created.
or ``None`` - if handle is invalid or timer no longer exists.
Examples:
>>> time, interval, kwargs = self.info_timer(handle)
"""
return await self.AD.sched.info_timer(handle, self.name)
@utils.sync_wrapper
async def run_in(self, callback, delay, **kwargs):
"""Runs the callback in a defined number of seconds.
This is used to add a delay, for instance, a 60 second delay before
a light is turned off after it has been triggered by a motion detector.
This callback should always be used instead of ``time.sleep()`` as
discussed previously.
Args:
callback: Function to be invoked when the requested state change occurs.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
delay (int): Delay, in seconds before the callback is invoked.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run the specified callback after 10 seconds.
>>> self.handle = self.run_in(self.run_in_c, 10)
Run the specified callback after 10 seconds with a keyword arg (title).
>>> self.handle = self.run_in(self.run_in_c, 5, title = "run_in5")
"""
name = self.name
self.logger.debug("Registering run_in in %s seconds for %s", delay, name)
# convert seconds to an int if possible since a common pattern is to
# pass this through from the config file which is a string
exec_time = await self.get_now() + timedelta(seconds=int(delay))
handle = await self.AD.sched.insert_schedule(name, exec_time, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_once(self, callback, start, **kwargs):
"""Runs the callback once, at the specified time of day.
Args:
callback: Function to be invoked at the specified time of day.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur. If the time
specified is in the past, the callback will occur the ``next day`` at
the specified time.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run at 4pm today, or 4pm tomorrow if it is already after 4pm.
>>> runtime = datetime.time(16, 0, 0)
>>> handle = self.run_once(self.run_once_c, runtime)
Run today at 10:30 using the `parse_time()` function.
>>> handle = self.run_once(self.run_once_c, "10:30:00")
Run at sunset.
>>> handle = self.run_once(self.run_once_c, "sunset")
Run an hour after sunrise.
>>> handle = self.run_once(self.run_once_c, "sunrise + 01:00:00")
"""
if type(start) == datetime.time:
when = start
elif type(start) == str:
start_time_obj = await self.AD.sched._parse_time(start, self.name)
when = start_time_obj["datetime"].time()
else:
raise ValueError("Invalid type for start")
name = self.name
self.logger.debug("Registering run_once at %s for %s", when, name)
now = await self.get_now()
today = now.date()
event = datetime.datetime.combine(today, when)
aware_event = self.AD.sched.convert_naive(event)
if aware_event < now:
one_day = datetime.timedelta(days=1)
aware_event = aware_event + one_day
handle = await self.AD.sched.insert_schedule(name, aware_event, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_at(self, callback, start, **kwargs):
"""Runs the callback once, at the specified time of day.
Args:
callback: Function to be invoked at the specified time of day.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
The ``run_at()`` function will ``raise`` an exception if the specified time is in the ``past``.
Examples:
Run at 4pm today.
>>> runtime = datetime.time(16, 0, 0)
>>> today = datetime.date.today()
>>> event = datetime.datetime.combine(today, runtime)
>>> handle = self.run_at(self.run_at_c, event)
Run today at 10:30 using the `parse_time()` function.
>>> handle = self.run_at(self.run_at_c, "10:30:00")
Run on a specific date and time.
>>> handle = self.run_at(self.run_at_c, "2018-12-11 10:30:00")
Run at the next sunset.
>>> handle = self.run_at(self.run_at_c, "sunset")
Run an hour after the next sunrise.
>>> handle = self.run_at(self.run_at_c, "sunrise + 01:00:00")
"""
if type(start) == datetime.datetime:
when = start
elif type(start) == str:
start_time_obj = await self.AD.sched._parse_time(start, self.name)
when = start_time_obj["datetime"]
else:
raise ValueError("Invalid type for start")
aware_when = self.AD.sched.convert_naive(when)
name = self.name
self.logger.debug("Registering run_at at %s for %s", when, name)
now = await self.get_now()
if aware_when < now:
raise ValueError("{}: run_at() Start time must be " "in the future".format(self.name))
handle = await self.AD.sched.insert_schedule(name, aware_when, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_daily(self, callback, start, **kwargs):
"""Runs the callback at the same time every day.
Args:
callback: Function to be invoked every day at the specified time.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur. If the time
specified is in the past, the callback will occur the ``next day`` at
the specified time.
When specifying sunrise or sunset relative times using the ``parse_datetime()``
format, the time of the callback will be adjusted every day to track the actual
value of sunrise or sunset.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run daily at 7pm.
>>> runtime = datetime.time(19, 0, 0)
>>> self.run_daily(self.run_daily_c, runtime)
Run at 10:30 every day using the `parse_time()` function.
>>> handle = self.run_daily(self.run_daily_c, "10:30:00")
Run every day at sunrise.
>>> handle = self.run_daily(self.run_daily_c, "sunrise")
Run every day an hour after sunset.
>>> handle = self.run_daily(self.run_daily_c, "sunset + 01:00:00")
"""
info = None
when = None
if type(start) == datetime.time:
when = start
elif type(start) == str:
info = await self.AD.sched._parse_time(start, self.name)
else:
raise ValueError("Invalid type for start")
if info is None or info["sun"] is None:
if when is None:
when = info["datetime"].time()
aware_now = await self.get_now()
now = self.AD.sched.make_naive(aware_now)
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
event = event + datetime.timedelta(days=1)
handle = await self.run_every(callback, event, 24 * 60 * 60, **kwargs)
elif info["sun"] == "sunrise":
kwargs["offset"] = info["offset"]
handle = await self.run_at_sunrise(callback, **kwargs)
else:
kwargs["offset"] = info["offset"]
handle = await self.run_at_sunset(callback, **kwargs)
return handle
@utils.sync_wrapper
async def run_hourly(self, callback, start, **kwargs):
"""Runs the callback at the same time every hour.
Args:
callback: Function to be invoked every hour at the specified time.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``time`` object that specifies when the callback will occur,
the hour component of the time object is ignored. If the time specified
is in the past, the callback will occur the ``next hour`` at the specified
time. If time is not supplied, the callback will start an hour from the
time that ``run_hourly()`` was executed.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every hour, on the hour.
>>> runtime = datetime.time(0, 0, 0)
>>> self.run_hourly(self.run_hourly_c, runtime)
"""
now = await self.get_now()
if start is None:
event = now + datetime.timedelta(hours=1)
else:
event = now
event = event.replace(minute=start.minute, second=start.second)
if event < now:
event = event + datetime.timedelta(hours=1)
handle = await self.run_every(callback, event, 60 * 60, **kwargs)
return handle
@utils.sync_wrapper
async def run_minutely(self, callback, start, **kwargs):
"""Runs the callback at the same time every minute.
Args:
callback: Function to be invoked every minute.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``time`` object that specifies when the callback will occur,
the hour and minute components of the time object are ignored. If the
time specified is in the past, the callback will occur the ``next minute`` at
the specified time. If time is not supplied, the callback will start a
minute from the time that ``run_minutely()`` was executed.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every minute on the minute.
>>> time = datetime.time(0, 0, 0)
>>> self.run_minutely(self.run_minutely_c, time)
"""
now = await self.get_now()
if start is None:
event = now + datetime.timedelta(minutes=1)
else:
event = now
event = event.replace(second=start.second)
if event < now:
event = event + datetime.timedelta(minutes=1)
handle = await self.run_every(callback, event, 60, **kwargs)
return handle
@utils.sync_wrapper
async def run_every(self, callback, start, interval, **kwargs):
"""Runs the callback with a configurable delay starting at a specific time.
Args:
callback: Function to be invoked when the time interval is reached.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``datetime`` object that specifies when the initial callback
will occur, or can take the `now` string alongside an added offset. If given
in the past, it will be executed in the next interval time.
interval: Frequency (expressed in seconds) in which the callback should be executed.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every 17 minutes starting in 2 hours time.
>>> self.run_every(self.run_every_c, time, 17 * 60)
Run every 10 minutes starting now.
>>> self.run_every(self.run_every_c, "now", 10 * 60)
Run every 5 minutes starting now plus 5 seconds.
>>> self.run_every(self.run_every_c, "now+5", 5 * 60)
"""
name = self.name
now = await self.get_now()
if isinstance(start, str) and "now" in start: # meaning immediate time required
now_offset = 0
if "+" in start: # meaning time to be added
now_offset = int(re.findall(r"\d+", start)[0])
aware_start = await self.get_now()
aware_start = aware_start + datetime.timedelta(seconds=now_offset)
else:
aware_start = self.AD.sched.convert_naive(start)
if aware_start < now:
aware_start = now + datetime.timedelta(seconds=interval)
self.logger.debug(
"Registering run_every starting %s in %ss intervals for %s", aware_start, interval, name,
)
handle = await self.AD.sched.insert_schedule(
name, aware_start, callback, True, None, interval=interval, **kwargs
)
return handle
@utils.sync_wrapper
async def _schedule_sun(self, name, type_, callback, **kwargs):
if type_ == "next_rising":
event = self.AD.sched.next_sunrise()
else:
event = self.AD.sched.next_sunset()
handle = await self.AD.sched.insert_schedule(name, event, callback, True, type_, **kwargs)
return handle
@utils.sync_wrapper
async def run_at_sunset(self, callback, **kwargs):
"""Runs a callback every day at or around sunset.
Args:
callback: Function to be invoked at or around sunset. It must conform to the
standard Scheduler Callback format documented `here <APPGUIDE.html#about-schedule-callbacks>`__.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
offset (int, optional): The time in seconds that the callback should be delayed after
sunset. A negative value will result in the callback occurring before sunset.
This parameter cannot be combined with ``random_start`` or ``random_end``.
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Example using timedelta.
>>> self.run_at_sunset(self.sun, offset = datetime.timedelta(minutes = -45).total_seconds())
Or you can just do the math yourself.
>>> self.run_at_sunset(self.sun, offset = 30 * 60)
Run at a random time +/- 60 minutes from sunset.
>>> self.run_at_sunset(self.sun, random_start = -60*60, random_end = 60*60)
Run at a random time between 30 and 60 minutes before sunset.
>>> self.run_at_sunset(self.sun, random_start = -60*60, random_end = 30*60)
"""
name = self.name
self.logger.debug("Registering run_at_sunset with kwargs = %s for %s", kwargs, name)
handle = await self._schedule_sun(name, "next_setting", callback, **kwargs)
return handle
@utils.sync_wrapper
async def run_at_sunrise(self, callback, **kwargs):
"""Runs a callback every day at or around sunrise.
Args:
callback: Function to be invoked at or around sunrise. It must conform to the
standard Scheduler Callback format documented `here <APPGUIDE.html#about-schedule-callbacks>`__.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
offset (int, optional): The time in seconds that the callback should be delayed after
sunrise. A negative value will result in the callback occurring before sunrise.
This parameter cannot be combined with ``random_start`` or ``random_end``.
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run 45 minutes before sunset.
>>> self.run_at_sunrise(self.sun, offset = datetime.timedelta(minutes = -45).total_seconds())
Or you can just do the math yourself.
>>> self.run_at_sunrise(self.sun, offset = 30 * 60)
Run at a random time +/- 60 minutes from sunrise.
>>> self.run_at_sunrise(self.sun, random_start = -60*60, random_end = 60*60)
Run at a random time between 30 and 60 minutes before sunrise.
>>> self.run_at_sunrise(self.sun, random_start = -60*60, random_end = 30*60)
"""
name = self.name
self.logger.debug("Registering run_at_sunrise with kwargs = %s for %s", kwargs, name)
handle = await self._schedule_sun(name, "next_rising", callback, **kwargs)
return handle
#
# Dashboard
#
def dash_navigate(self, target, timeout=-1, ret=None, sticky=0, deviceid=None, dashid=None):
"""Forces all connected Dashboards to navigate to a new URL.
Args:
target (str): Name of the new Dashboard to navigate to (e.g., ``/SensorPanel``).
Note that this value is not a URL.
timeout (int): Length of time to stay on the new dashboard before returning
to the original. This argument is optional and if not specified, the
navigation will be permanent. Note that if there is a click or touch on
the new panel before the timeout expires, the timeout will be cancelled.
ret (str): Dashboard to return to after the timeout has elapsed.
sticky (int): Specifies whether or not to return to the original dashboard
after it has been clicked on. The default behavior (``sticky=0``) is to remain
on the new dashboard if clicked, or return to the original otherwise.
By using a different value (sticky= 5), clicking the dashboard will extend
the amount of time (in seconds), but it will return to the original dashboard
after a period of inactivity equal to timeout.
deviceid (str): If set, only the device which has the same deviceid will navigate.
dashid (str): If set, all devices currently on a dashboard which the title contains
the substring dashid will navigate. ex: if dashid is "kichen", it will match
devices which are on "kitchen lights", "kitchen sensors", "ipad - kitchen", etc.
Returns:
None.
Examples:
Switch to AlarmStatus Panel then return to current panel after 10 seconds.
>>> self.dash_navigate("/AlarmStatus", timeout=10)
Switch to Locks Panel then return to Main panel after 10 seconds.
>>> self.dash_navigate("/Locks", timeout=10, ret="/SensorPanel")
"""
kwargs = {"command": "navigate", "target": target, "sticky": sticky}
if timeout != -1:
kwargs["timeout"] = timeout
if ret is not None:
kwargs["return"] = ret
if deviceid is not None:
kwargs["deviceid"] = deviceid
if dashid is not None:
kwargs["dashid"] = dashid
self.fire_event("ad_dashboard", **kwargs)
#
# Async
#
async def run_in_executor(self, func, *args, **kwargs):
"""Runs a Sync function from within an Async function using Executor threads.
The function is actually awaited during execution
Args:
func: The function to be executed.
*args (optional): Any additional arguments to be used by the function
**kwargs (optional): Any additional keyword arguments to be used by the function
Returns:
None
Examples:
>>> await self.run_in_executor(self.run_request)
"""
return await utils.run_in_executor(self, func, *args, **kwargs)
def submit_to_executor(self, func, *args, **kwargs):
"""Submits a Sync function from within another Sync function to be executed using Executor threads.
The function is not waited to be executed. As it submits and continues the rest of the code.
This can be useful if wanting to execute a long running code, and don't want it to hold up the
thread for other callbacks.
Args:
func: The function to be executed.
*args (optional): Any additional arguments to be used by the function
**kwargs (optional): Any additional keyword arguments to be used by the function.
Part of the keyword arguments will be the ``callback``, which will be ran when the function has completed execution
Returns:
A Future, which can be cancelled by calling f.cancel().
Examples:
>>> f = self.submit_to_executor(self.run_request, callback=self.callback)
>>>
>>> def callback(self, kwargs):
"""
callback = kwargs.pop("callback", None)
# get stuff we'll need to fake scheduler call
sched_data = {
"id": uuid.uuid4().hex,
"name": self.name,
"objectid": self.AD.app_management.objects[self.name]["id"],
"type": "scheduler",
"function": callback,
"pin_app": self.get_app_pin(),
"pin_thread": self.get_pin_thread(),
}
def callback_inner(f):
try:
# TODO: use our own callback type instead of borrowing
# from scheduler
rargs = {}
rargs["result"] = f.result()
sched_data["kwargs"] = rargs
self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))
# callback(f.result(), kwargs)
except Exception as e:
self.error(e, level="ERROR")
f = self.AD.executor.submit(func, *args, **kwargs)
if callback is not None:
self.logger.debug("Adding add_done_callback for future %s for %s", f, self.name)
f.add_done_callback(callback_inner)
self.AD.futures.add_future(self.name, f)
return f
@utils.sync_wrapper
async def create_task(self, coro, callback=None, **kwargs):
"""Schedules a Coroutine to be executed.
Args:
coro: The coroutine object (`not coroutine function`) to be executed.
callback: The non-async callback to be executed when complete.
**kwargs (optional): Any additional keyword arguments to send the callback.
Returns:
A Future, which can be cancelled by calling f.cancel().
Examples:
>>> f = self.create_task(asyncio.sleep(3), callback=self.coro_callback)
>>>
>>> def coro_callback(self, kwargs):
"""
# get stuff we'll need to fake scheduler call
sched_data = {
"id": uuid.uuid4().hex,
"name": self.name,
"objectid": self.AD.app_management.objects[self.name]["id"],
"type": "scheduler",
"function": callback,
"pin_app": await self.get_app_pin(),
"pin_thread": await self.get_pin_thread(),
}
def callback_inner(f):
try:
# TODO: use our own callback type instead of borrowing
# from scheduler
kwargs["result"] = f.result()
sched_data["kwargs"] = kwargs
self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))
# callback(f.result(), kwargs)
except asyncio.CancelledError:
pass
f = asyncio.ensure_future(coro)
if callback is not None:
self.logger.debug("Adding add_done_callback for future %s for %s", f, self.name)
f.add_done_callback(callback_inner)
self.AD.futures.add_future(self.name, f)
return f
@staticmethod
async def sleep(delay, result=None):
"""Pause execution for a certain time span
(not available in sync apps)
Args:
delay (float): Number of seconds to pause.
result (optional): Result to return upon delay completion.
Returns:
Result or `None`.
Notes:
This function is not available in sync apps.
Examples:
>>> async def myfunction(self):
>>> await self.sleep(5)
"""
is_async = None
try:
asyncio.get_event_loop()
is_async = True
except RuntimeError:
is_async = False
if not is_async:
raise RuntimeError("The sleep method is for use in ASYNC methods only")
return await asyncio.sleep(delay, result=result)
#
# Other
#
def run_in_thread(self, callback, thread, **kwargs):
"""Schedules a callback to be run in a different thread from the current one.
Args:
callback: Function to be run on the new thread.
thread (int): Thread number (0 - number of threads).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
None.
Examples:
>>> self.run_in_thread(my_callback, 8)
"""
self.run_in(callback, 0, pin=False, pin_thread=thread, **kwargs)
@utils.sync_wrapper
async def get_thread_info(self):
"""Gets information on AppDaemon worker threads.
Returns:
A dictionary containing all the information for AppDaemon worker threads.
Examples:
>>> thread_info = self.get_thread_info()
"""
return await self.AD.threading.get_thread_info()
@utils.sync_wrapper
async def get_scheduler_entries(self):
"""Gets information on AppDaemon scheduler entries.
Returns:
A dictionary containing all the information for entries in the AppDaemon scheduler.
Examples:
>>> schedule = self.get_scheduler_entries()
"""
return await self.AD.sched.get_scheduler_entries()
@utils.sync_wrapper
async def get_callback_entries(self):
"""Gets information on AppDaemon callback entries.
Returns:
A dictionary containing all the information for entries in the AppDaemon state,
and event callback table.
Examples:
>>> callbacks = self.get_callback_entries()
"""
return await self.AD.callbacks.get_callback_entries()
@utils.sync_wrapper
async def depends_on_module(self, *modules):
"""Registers a global_modules dependency for an app.
Args:
*modules: Modules to register a dependency on.
Returns:
None.
Examples:
>>> import somemodule
>>> import anothermodule
>>> # later
>>> self.depends_on_module([somemodule)
"""
return await self.AD.app_management.register_module_dependency(self.name, *modules)
<|code_end|>
appdaemon/services.py
<|code_start|>import threading
import traceback
import asyncio
from copy import deepcopy
from typing import Any, Optional, Callable
from appdaemon.appdaemon import AppDaemon
from appdaemon.exceptions import NamespaceException
import appdaemon.utils as utils
class Services:
def __init__(self, ad: AppDaemon):
self.AD = ad
self.services = {}
self.services_lock = threading.RLock()
self.app_registered_services = {}
self.logger = ad.logging.get_child("_services")
def register_service(
self, namespace: str, domain: str, service: str, callback: Callable, **kwargs: Optional[dict]
) -> None:
self.logger.debug(
"register_service called: %s.%s.%s -> %s", namespace, domain, service, callback,
)
__silent = kwargs.pop("__silent", False)
with self.services_lock:
name = kwargs.get("__name")
# first we confirm if the namespace exists
if name and namespace not in self.AD.state.state:
raise NamespaceException(f"Namespace '{namespace}', doesn't exist")
if namespace not in self.services:
self.services[namespace] = {}
if domain not in self.services[namespace]:
self.services[namespace][domain] = {}
if service in self.services[namespace][domain]:
# there was a service already registered before
# so if a different app, we ask to deregister first
service_app = self.services[namespace][domain][service].get("__name")
if service_app and service_app != name:
self.logger.warning(
f"This service '{domain}/{service}' already registered to a different app '{service_app}'. Do deregister from app first"
)
return
self.services[namespace][domain][service] = {"callback": callback, **kwargs}
if __silent is False:
data = {
"event_type": "service_registered",
"data": {"namespace": namespace, "domain": domain, "service": service},
}
self.AD.loop.create_task(self.AD.events.process_event(namespace, data))
if name:
if name not in self.app_registered_services:
self.app_registered_services[name] = set()
self.app_registered_services[name].add(f"{namespace}:{domain}:{service}")
def deregister_service(self, namespace: str, domain: str, service: str, **kwargs: dict) -> bool:
"""Used to unregister a service"""
self.logger.debug(
"deregister_service called: %s:%s:%s %s", namespace, domain, service, kwargs,
)
name = kwargs.get("__name")
if not name:
raise ValueError("App must be given to deregister service call")
if name not in self.app_registered_services:
raise ValueError(f"The given App {name} has no services registered")
app_service = f"{namespace}:{domain}:{service}"
if app_service not in self.app_registered_services[name]:
raise ValueError(f"The given App {name} doesn't have the given service registered it")
# if it gets here, then time to deregister
with self.services_lock:
# it belongs to the app
del self.services[namespace][domain][service]
data = {
"event_type": "service_deregistered",
"data": {"namespace": namespace, "domain": domain, "service": service, "app": name},
}
self.AD.loop.create_task(self.AD.events.process_event(namespace, data))
# now check if that domain is empty
# if it is, remove it also
if self.services[namespace][domain] == {}:
# its empty
del self.services[namespace][domain]
# now check if that namespace is empty
# if it is, remove it also
if self.services[namespace] == {}:
# its empty
del self.services[namespace]
self.app_registered_services[name].remove(app_service)
if not self.app_registered_services[name]:
del self.app_registered_services[name]
return True
def clear_services(self, name: str) -> None:
"""Used to clear services"""
if name not in self.app_registered_services:
return
app_services = deepcopy(self.app_registered_services[name])
for app_service in app_services:
namespace, domain, service = app_service.split(":")
self.deregister_service(namespace, domain, service, __name=name)
def list_services(self, ns: str = "global") -> list:
result = []
with self.services_lock:
for namespace in self.services:
if ns != "global" and namespace != ns:
continue
for domain in self.services[namespace]:
for service in self.services[namespace][domain]:
result.append({"namespace": namespace, "domain": domain, "service": service})
return result
async def call_service(self, namespace: str, domain: str, service: str, data: dict) -> Any:
self.logger.debug(
"call_service: namespace=%s domain=%s service=%s data=%s", namespace, domain, service, data,
)
with self.services_lock:
if namespace not in self.services:
name = data.get("__name", None)
self.logger.warning("Unknown namespace (%s) in call_service from %s", namespace, name)
return None
if domain not in self.services[namespace]:
name = data.get("__name", None)
self.logger.warning(
"Unknown domain (%s/%s) in call_service from %s", namespace, domain, name,
)
return None
if service not in self.services[namespace][domain]:
name = data.get("__name", None)
self.logger.warning(
"Unknown service (%s/%s/%s) in call_service from %s", namespace, domain, service, name,
)
return None
# If we have namespace in data it's an override for the domain of the eventual service call, as distinct
# from the namespace the call itself is executed from. e.g. set_state() is in the AppDaemon namespace but
# needs to operate on a different namespace, e.g. "default"
if "__name" in data:
del data["__name"]
if "namespace" in data:
ns = data["namespace"]
del data["namespace"]
else:
ns = namespace
try:
funcref = self.services[namespace][domain][service]["callback"]
# Decide whether or not to call this as async
# Default to true
isasync = True
if "__async" in self.services[namespace][domain][service]:
# We have a kwarg to tell us what to do
if self.services[namespace][domain][service]["__async"] == "auto":
# We decide based on introspection
if not asyncio.iscoroutinefunction(funcref):
isasync = False
else:
# We do what the kwarg tells us
isasync = self.services[namespace][domain][service]["__async"]
if isasync is True:
# it's a coroutine just await it.
return await funcref(ns, domain, service, data)
else:
# It's not a coroutine, , run it in an executor
return await utils.run_in_executor(self, funcref, ns, domain, service, data)
except Exception:
self.logger.error("-" * 60)
self.logger.error("Unexpected error in call_service()")
self.logger.error("-" * 60)
self.logger.error(traceback.format_exc())
self.logger.error("-" * 60)
return None
<|code_end|>
| appdaemon/adapi.py
<|code_start|>import asyncio
import datetime
import inspect
import iso8601
import re
from datetime import timedelta
from copy import deepcopy
from typing import Any, Optional, Callable
# needed for fake coro cb that looks like scheduler
import uuid
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class ADAPI:
"""AppDaemon API class.
This class includes all native API calls to AppDaemon
"""
#
# Internal parameters
#
def __init__(self, ad: AppDaemon, name, logging_obj, args, config, app_config, global_vars):
# Store args
self.AD = ad
self.name = name
self._logging = logging_obj
self.config = config
self.app_config = app_config
self.args = deepcopy(args)
self.app_dir = self.AD.app_dir
self.config_dir = self.AD.config_dir
self.dashboard_dir = None
if self.AD.http is not None:
self.dashboard_dir = self.AD.http.dashboard_dir
self.global_vars = global_vars
self._namespace = "default"
self.logger = self._logging.get_child(name)
self.err = self._logging.get_error().getChild(name)
self.user_logs = {}
if "log_level" in args:
self.logger.setLevel(args["log_level"])
self.err.setLevel(args["log_level"])
if "log" in args:
userlog = self.get_user_log(args["log"])
if userlog is not None:
self.logger = userlog
self.dialogflow_v = 2
@staticmethod
def _sub_stack(msg):
# If msg is a data structure of some type, don't sub
if type(msg) is str:
stack = inspect.stack()
if msg.find("__module__") != -1:
msg = msg.replace("__module__", stack[2][1])
if msg.find("__line__") != -1:
msg = msg.replace("__line__", str(stack[2][2]))
if msg.find("__function__") != -1:
msg = msg.replace("__function__", stack[2][3])
return msg
def _get_namespace(self, **kwargs):
if "namespace" in kwargs:
namespace = kwargs["namespace"]
del kwargs["namespace"]
else:
namespace = self._namespace
return namespace
#
# Logging
#
def _log(self, logger, msg, *args, **kwargs):
#
# Internal
#
if "level" in kwargs:
level = kwargs.pop("level", "INFO")
else:
level = "INFO"
ascii_encode = kwargs.pop("ascii_encode", True)
if ascii_encode is True:
msg = str(msg).encode("utf-8", "replace").decode("ascii", "replace")
logger.log(self._logging.log_levels[level], msg, *args, **kwargs)
def log(self, msg, *args, **kwargs):
"""Logs a message to AppDaemon's main logfile.
Args:
msg (str): The message to log.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
level (str, optional): The log level of the message - takes a string representing the
standard logger levels (Default: ``"WARNING"``).
ascii_encode (bool, optional): Switch to disable the encoding of all log messages to
ascii. Set this to true if you want to log UTF-8 characters (Default: ``True``).
log (str, optional): Send the message to a specific log, either system or user_defined.
System logs are ``main_log``, ``error_log``, ``diag_log`` or ``access_log``.
Any other value in use here must have a corresponding user-defined entity in
the ``logs`` section of appdaemon.yaml.
stack_info (bool, optional): If ``True`` the stack info will included.
Returns:
None.
Examples:
Log a message to the main logfile of the system.
>>> self.log("Log Test: Parameter is %s", some_variable)
Log a message to the specified logfile.
>>> self.log("Log Test: Parameter is %s", some_variable, log="test_log")
Log a message with error-level to the main logfile of the system.
>>> self.log("Log Test: Parameter is %s", some_variable, level = "ERROR")
Log a message using `placeholders` to the main logfile of the system.
>>> self.log("Line: __line__, module: __module__, function: __function__, Msg: Something bad happened")
Log a WARNING message (including the stack info) to the main logfile of the system.
>>> self.log("Stack is", some_value, level="WARNING", stack_info=True)
"""
if "log" in kwargs:
# Its a user defined log
logger = self.get_user_log(kwargs["log"])
kwargs.pop("log")
else:
logger = self.logger
try:
msg = self._sub_stack(msg)
except IndexError as i:
rargs = deepcopy(kwargs)
rargs["level"] = "ERROR"
self._log(self.err, i, *args, **rargs)
self._log(logger, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""Logs a message to AppDaemon's error logfile.
Args:
msg (str): The message to log.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
level (str, optional): The log level of the message - takes a string representing the
standard logger levels.
ascii_encode (bool, optional): Switch to disable the encoding of all log messages to
ascii. Set this to true if you want to log UTF-8 characters (Default: ``True``).
log (str, optional): Send the message to a specific log, either system or user_defined.
System logs are ``main_log``, ``error_log``, ``diag_log`` or ``access_log``.
Any other value in use here must have a corresponding user-defined entity in
the ``logs`` section of appdaemon.yaml.
Returns:
None.
Examples:
Log an error message to the error logfile of the system.
>>> self.error("Some Warning string")
Log an error message with critical-level to the error logfile of the system.
>>> self.error("Some Critical string", level = "CRITICAL")
"""
self._log(self.err, msg, *args, **kwargs)
@utils.sync_wrapper
async def listen_log(self, callback, level="INFO", **kwargs):
"""Registers the App to receive a callback every time an App logs a message.
Args:
callback (function): Function to be called when a message is logged.
level (str): Logging level to be used - lower levels will not be forwarded
to the app (Default: ``"INFO"``).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
log (str, optional): Name of the log to listen to, default is all logs. The name
should be one of the 4 built in types ``main_log``, ``error_log``, ``diag_log``
or ``access_log`` or a user defined log entry.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A unique identifier that can be used to cancel the callback if required.
Since variables created within object methods are local to the function they are
created in, and in all likelihood, the cancellation will be invoked later in a
different function, it is recommended that handles are stored in the object
namespace, e.g., self.handle.
Examples:
Listen to all ``WARNING`` log messages of the system.
>>> self.handle = self.listen_log(self.cb, "WARNING")
Sample callback:
>>> def log_message(self, name, ts, level, type, message, kwargs):
Listen to all ``WARNING`` log messages of the `main_log`.
>>> self.handle = self.listen_log(self.cb, "WARNING", log="main_log")
Listen to all ``WARNING`` log messages of a user-defined logfile.
>>> self.handle = self.listen_log(self.cb, "WARNING", log="my_custom_log")
"""
namespace = kwargs.pop("namespace", "admin")
return await self.AD.logging.add_log_callback(namespace, self.name, callback, level, **kwargs)
@utils.sync_wrapper
async def cancel_listen_log(self, handle):
"""Cancels the log callback for the App.
Args:
handle: The handle returned when the `listen_log` call was made.
Returns:
Boolean.
Examples:
>>> self.cancel_listen_log(handle)
"""
self.logger.debug("Canceling listen_log for %s", self.name)
return await self.AD.logging.cancel_log_callback(self.name, handle)
def get_main_log(self):
"""Returns the underlying logger object used for the main log.
Examples:
Log a critical message to the `main` logfile of the system.
>>> log = self.get_main_log()
>>> log.critical("Log a critical error")
"""
return self.logger
def get_error_log(self):
"""Returns the underlying logger object used for the error log.
Examples:
Log an error message to the `error` logfile of the system.
>>> error_log = self.get_error_log()
>>> error_log.error("Log an error", stack_info=True, exc_info=True)
"""
return self.err
def get_user_log(self, log):
"""Gets the specified-user logger of the App.
Args:
log (str): The name of the log you want to get the underlying logger object from,
as described in the ``logs`` section of ``appdaemon.yaml``.
Returns:
The underlying logger object used for the error log.
Examples:
Log an error message to a user-defined logfile.
>>> log = self.get_user_log("test_log")
>>> log.error("Log an error", stack_info=True, exc_info=True)
"""
logger = None
if log in self.user_logs:
# Did we use it already?
logger = self.user_logs[log]
else:
# Build it on the fly
parent = self.AD.logging.get_user_log(self, log)
if parent is not None:
logger = parent.getChild(self.name)
self.user_logs[log] = logger
if "log_level" in self.args:
logger.setLevel(self.args["log_level"])
return logger
def set_log_level(self, level):
"""Sets a specific log level for the App.
Args:
level (str): Log level.
Returns:
None.
Notes:
Supported log levels: ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``,
``DEBUG``, ``NOTSET``.
Examples:
>>> self.set_log_level("DEBUG")
"""
self.logger.setLevel(self._logging.log_levels[level])
self.err.setLevel(self._logging.log_levels[level])
for log in self.user_logs:
self.user_logs[log].setLevel(self._logging.log_levels[level])
def set_error_level(self, level):
"""Sets the log level to send to the `error` logfile of the system.
Args:
level (str): Error level.
Returns:
None.
Notes:
Supported log levels: ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``,
``DEBUG``, ``NOTSET``.
"""
self.err.setLevel(self._logging.log_levels[level])
#
# Threading
#
@utils.sync_wrapper
async def set_app_pin(self, pin):
"""Sets an App to be pinned or unpinned.
Args:
pin (bool): Sets whether the App becomes pinned or not.
Returns:
None.
Examples:
The following line should be put inside the `initialize()` function.
>>> self.set_app_pin(True)
"""
await self.AD.threading.set_app_pin(self.name, pin)
@utils.sync_wrapper
async def get_app_pin(self):
"""Finds out if the current App is currently pinned or not.
Returns:
bool: ``True`` if the App is pinned, ``False`` otherwise.
Examples:
>>> if self.get_app_pin(True):
>>> self.log("App pinned!")
"""
return await self.AD.threading.get_app_pin(self.name)
@utils.sync_wrapper
async def set_pin_thread(self, thread):
"""Sets the thread that the App will be pinned to.
Args:
thread (int): Number of the thread to pin to. Threads start at 0 and go up to the number
of threads specified in ``appdaemon.yaml`` -1.
Returns:
None.
Examples:
The following line should be put inside the `initialize()` function.
>>> self.set_pin_thread(5)
"""
return await self.AD.threading.set_pin_thread(self.name, thread)
@utils.sync_wrapper
async def get_pin_thread(self):
"""Finds out which thread the App is pinned to.
Returns:
int: The thread number or -1 if the App is not pinned.
Examples:
>>> thread = self.get_pin_thread():
>>> self.log(f"I'm pinned to thread: {thread}")
"""
return await self.AD.threading.get_pin_thread(self.name)
#
# Namespace
#
def set_namespace(self, namespace):
"""Sets a new namespace for the App to use from that point forward.
Args:
namespace (str): Name of the new namespace
Returns:
None.
Examples:
>>> self.set_namespace("hass1")
"""
self._namespace = namespace
def get_namespace(self):
"""Returns the App's namespace."""
return self._namespace
@utils.sync_wrapper
async def namespace_exists(self, namespace):
"""Checks the existence of a namespace in AppDaemon.
Args:
namespace (str): The namespace to be checked if it exists.
Returns:
bool: ``True`` if the namespace exists, ``False`` otherwise.
Examples:
Check if the namespace ``storage`` exists within AD
>>> if self.namespace_exists("storage"):
>>> #do something like create it
"""
return await self.AD.state.namespace_exists(namespace)
@utils.sync_wrapper
async def add_namespace(self, namespace, **kwargs):
"""Used to add a user-defined namespaces from apps, which has a database file associated with it.
This way, when AD restarts these entities will be reloaded into AD with its
previous states within the namespace. This can be used as a basic form of
non-volatile storage of entity data. Depending on the configuration of the
namespace, this function can be setup to constantly be running automatically
or only when AD shutdown. This function also allows for users to manually
execute the command as when needed.
Args:
namespace (str): The namespace to be newly created, which must not be same as the operating namespace
writeback (optional): The writeback to be used.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
writeback (str, optional): The writeback to be used. WIll be safe by default
persist (bool, optional): If to make the namespace persistent. So if AD reboots
it will startup will all the created entities being intact. It is persistent by default
Returns:
The file path to the newly created namespace. WIll be None if not persistent
Examples:
Add a new namespace called `storage`.
>>> self.add_namespace("storage")
"""
if namespace == self.get_namespace(): # if it belongs to this app's namespace
raise ValueError("Cannot add namespace with the same name as operating namespace")
writeback = kwargs.get("writeback", "safe")
persist = kwargs.get("persist", True)
return await self.AD.state.add_namespace(namespace, writeback, persist, self.name)
@utils.sync_wrapper
async def remove_namespace(self, namespace):
"""Used to remove a previously user-defined namespaces from apps, which has a database file associated with it.
Args:
namespace (str): The namespace to be removed, which must not be same as the operating namespace
Returns:
The data within that namespace
Examples:
Removes the namespace called `storage`.
>>> self.remove_namespace("storage")
"""
if namespace == self.get_namespace(): # if it belongs to this app's namespace
raise ValueError("Cannot remove namespace with the same name as operating namespace")
return await self.AD.state.remove_namespace(namespace)
@utils.sync_wrapper
async def list_namespaces(self):
"""Returns a list of available namespaces.
Examples:
>>> self.list_namespaces()
"""
return await self.AD.state.list_namespaces()
@utils.sync_wrapper
async def save_namespace(self, **kwargs):
"""Saves entities created in user-defined namespaces into a file.
This way, when AD restarts these entities will be reloaded into AD with its
previous states within the namespace. This can be used as a basic form of
non-volatile storage of entity data. Depending on the configuration of the
namespace, this function can be setup to constantly be running automatically
or only when AD shutdown. This function also allows for users to manually
execute the command as when needed.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Save all entities of the default namespace.
>>> self.save_namespace()
"""
namespace = self._get_namespace(**kwargs)
await self.AD.state.save_namespace(namespace)
#
# Utility
#
@utils.sync_wrapper
async def get_app(self, name):
"""Gets the instantiated object of another app running within the system.
This is useful for calling functions or accessing variables that reside
in different apps without requiring duplication of code.
Args:
name (str): Name of the app required. This is the name specified in
header section of the config file, not the module or class.
Returns:
An object reference to the class.
Examples:
>>> MyApp = self.get_app("MotionLights")
>>> MyApp.turn_light_on()
"""
return await self.AD.app_management.get_app(name)
@utils.sync_wrapper
async def _check_entity(self, namespace, entity):
if "." not in entity:
raise ValueError("{}: Invalid entity ID: {}".format(self.name, entity))
if not await self.AD.state.entity_exists(namespace, entity):
self.logger.warning("%s: Entity %s not found in namespace %s", self.name, entity, namespace)
@staticmethod
def get_ad_version():
"""Returns a string with the current version of AppDaemon.
Examples:
>>> version = self.get_ad_version()
"""
return utils.__version__
#
# Entity
#
@utils.sync_wrapper
async def add_entity(self, entity_id, state=None, attributes=None, **kwargs):
"""Adds a non-existent entity, by creating it within a namespaces.
If an entity doesn't exists and needs to be created, this function can be used to create it locally.
Please note this only creates the entity locally.
Args:
entity_id (str): The fully qualified entity id (including the device type).
state (str): The state the entity is to have
attributes (dict): The attributes the entity is to have
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Add the entity in the present namespace.
>>> self.add_entity('sensor.living_room')
adds the entity in the `mqtt` namespace.
>>> self.add_entity('mqtt.living_room_temperature', namespace='mqtt')
"""
namespace = self._get_namespace(**kwargs)
if await self.AD.state.entity_exists(namespace, entity_id):
self.logger.warning("%s already exists, will not be adding it", entity_id)
return None
await self.AD.state.add_entity(namespace, entity_id, state, attributes)
return None
@utils.sync_wrapper
async def entity_exists(self, entity_id, **kwargs):
"""Checks the existence of an entity in Home Assistant.
When working with multiple Home Assistant instances, it is possible to specify the
namespace, so that it checks within the right instance in in the event the app is
working in a different instance. Also when using this function, it is also possible
to check if an AppDaemon entity exists.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
bool: ``True`` if the entity id exists, ``False`` otherwise.
Examples:
Check if the entity light.living_room exist within the app's namespace
>>> if self.entity_exists("light.living_room"):
>>> #do something
Check if the entity mqtt.security_settings exist within the `mqtt` namespace
if the app is operating in a different namespace like default
>>> if self.entity_exists("mqtt.security_settings", namespace = "mqtt"):
>>> #do something
"""
namespace = self._get_namespace(**kwargs)
return await self.AD.state.entity_exists(namespace, entity_id)
@utils.sync_wrapper
async def split_entity(self, entity_id, **kwargs):
"""Splits an entity into parts.
This utility function will take a fully qualified entity id of the form ``light.hall_light``
and split it into 2 values, the device and the entity, e.g. light and hall_light.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
A list with 2 entries, the device and entity respectively.
Examples:
Do some action if the device of the entity is `scene`.
>>> device, entity = self.split_entity(entity_id)
>>> if device == "scene":
>>> #do something specific to scenes
"""
await self._check_entity(self._get_namespace(**kwargs), entity_id)
return entity_id.split(".")
@utils.sync_wrapper
async def remove_entity(self, entity_id, **kwargs):
"""Deletes an entity created within a namespaces.
If an entity was created, and its deemed no longer needed, by using this function,
the entity can be removed from AppDaemon permanently.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
None.
Examples:
Delete the entity in the present namespace.
>>> self.remove_entity('sensor.living_room')
Delete the entity in the `mqtt` namespace.
>>> self.remove_entity('mqtt.living_room_temperature', namespace = 'mqtt')
"""
namespace = self._get_namespace(**kwargs)
await self.AD.state.remove_entity(namespace, entity_id)
return None
@staticmethod
def split_device_list(devices):
"""Converts a comma-separated list of device types to an iterable list.
This is intended to assist in use cases where the App takes a list of
entities from an argument, e.g., a list of sensors to monitor. If only
one entry is provided, an iterable list will still be returned to avoid
the need for special processing.
Args:
devices (str): A comma-separated list of devices to be split (without spaces).
Returns:
A list of split devices with 1 or more entries.
Examples:
>>> for sensor in self.split_device_list(self.args["sensors"]):
>>> #do something for each sensor, e.g., make a state subscription
"""
return devices.split(",")
@utils.sync_wrapper
async def get_plugin_config(self, **kwargs):
"""Gets any useful metadata that the plugin may have available.
For instance, for the HASS plugin, this will return Home Assistant configuration
data such as latitude and longitude.
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str): Select the namespace of the plugin for which data is desired.
Returns:
A dictionary containing all the configuration information available
from the Home Assistant ``/api/config`` endpoint.
Examples:
>>> config = self.get_plugin_config()
>>> self.log(f'My current position is {config["latitude"]}(Lat), {config["longitude"]}(Long)')
My current position is 50.8333(Lat), 4.3333(Long)
"""
namespace = self._get_namespace(**kwargs)
return await self.AD.plugins.get_plugin_meta(namespace)
@utils.sync_wrapper
async def friendly_name(self, entity_id, **kwargs):
"""Gets the Friendly Name of an entity.
Args:
entity_id (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases it is safe to ignore this parameter.
Returns:
str: The friendly name of the entity if it exists or the entity id if not.
Examples:
>>> tracker = "device_tracker.andrew"
>>> friendly_name = self.friendly_name(tracker)
>>> tracker_state = self.get_tracker_state(tracker)
>>> self.log(f"{tracker} ({friendly_name}) is {tracker_state}.")
device_tracker.andrew (Andrew Tracker) is on.
"""
await self._check_entity(self._get_namespace(**kwargs), entity_id)
state = await self.get_state(**kwargs)
if entity_id in state:
if "friendly_name" in state[entity_id]["attributes"]:
return state[entity_id]["attributes"]["friendly_name"]
else:
return entity_id
return None
@utils.sync_wrapper
async def set_production_mode(self, mode=True):
"""Deactivates or activates the production mode in AppDaemon.
When called without declaring passing any arguments, mode defaults to ``True``.
Args:
mode (bool): If it is ``True`` the production mode is activated, or deactivated
otherwise.
Returns:
The specified mode or ``None`` if a wrong parameter is passed.
"""
if not isinstance(mode, bool):
self.logger.warning("%s not a valid parameter for Production Mode", mode)
return None
await self.AD.utility.set_production_mode(mode)
return mode
#
# Internal Helper functions
#
def start_app(self, app, **kwargs):
"""Starts an App which can either be running or not.
This Api call cannot start an app which has already been disabled in the App Config.
It essentially only runs the initialize() function in the app, and changes to attributes
like class name or app config is not taken into account.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.start_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "admin"
kwargs["__name"] = self.name
self.call_service("app/start", **kwargs)
return None
def stop_app(self, app, **kwargs):
"""Stops an App which is running.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.stop_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "admin"
kwargs["__name"] = self.name
self.call_service("app/stop", **kwargs)
return None
def restart_app(self, app, **kwargs):
"""Restarts an App which can either be running or not.
Args:
app (str): Name of the app.
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.restart_app("lights_app")
"""
kwargs["app"] = app
kwargs["namespace"] = "admin"
kwargs["__name"] = self.name
self.call_service("app/restart", **kwargs)
return None
def reload_apps(self, **kwargs):
"""Reloads the apps, and loads up those that have changes made to their .yaml or .py files.
This utility function can be used if AppDaemon is running in production mode, and it is
needed to reload apps that changes have been made to.
Args:
**kwargs (optional): Zero or more keyword arguments.
Returns:
None.
Examples:
>>> self.reload_apps()
"""
kwargs["namespace"] = "admin"
kwargs["__name"] = self.name
self.call_service("app/reload", **kwargs)
return None
#
# Dialogflow
#
def get_dialogflow_intent(self, data):
"""Gets the intent's action from the Google Home response.
Args:
data: Response received from Google Home.
Returns:
A string representing the Intent from the interaction model that was requested,
or ``None``, if no action was received.
Examples:
>>> intent = ADAPI.get_dialogflow_intent(data)
"""
if "result" in data and "action" in data["result"]:
self.dialogflow_v = 1
return data["result"]["action"]
elif "queryResult" in data and "action" in data["queryResult"]:
self.dialogflow_v = 2
return data["queryResult"]["action"]
else:
return None
@staticmethod
def get_dialogflow_slot_value(data, slot=None):
"""Gets slots' values from the interaction model.
Args:
data: Response received from Google Home.
slot (str): Name of the slot. If a name is not specified, all slots will be returned
as a dictionary. If a name is specified but is not found, ``None`` will be returned.
Returns:
A string representing the value of the slot from the interaction model, or a hash of slots.
Examples:
>>> beer_type = ADAPI.get_dialogflow_intent(data, "beer_type")
>>> all_slots = ADAPI.get_dialogflow_intent(data)
"""
if "result" in data:
# using V1 API
contexts = data["result"]["contexts"][0]
if contexts:
parameters = contexts.get("parameters")
else:
parameters = data["result"]["parameters"]
if slot is None:
return parameters
elif slot in parameters:
return parameters[slot]
else:
return None
elif "queryResult" in data:
# using V2 API
contexts = data["queryResult"]["outputContexts"][0]
if contexts:
parameters = contexts.get("parameters")
else:
parameters = data["queryResult"]["parameters"]
if slot is None:
return parameters
elif slot in parameters:
return parameters[slot]
else:
return None
else:
return None
def format_dialogflow_response(self, speech=None):
"""Formats a response to be returned to Google Home, including speech.
Args:
speech (str): The text for Google Home to say.
Returns:
None.
Examples:
>>> ADAPI.format_dialogflow_response(speech = "Hello World")
"""
if self.dialogflow_v == 1:
speech = {"speech": speech, "source": "Appdaemon", "displayText": speech}
elif self.dialogflow_v == 2:
speech = {"fulfillmentText": speech, "source": "Appdaemon"}
else:
speech = None
return speech
#
# Alexa
#
@staticmethod
def format_alexa_response(speech=None, card=None, title=None):
"""Formats a response to be returned to Alex including speech and a card.
Args:
speech (str): The text for Alexa to say.
card (str): Text for the card.
title (str): Title for the card.
Returns:
None.
Examples:
>>> ADAPI.format_alexa_response(speech = "Hello World", card = "Greetings to the world", title = "Hello")
"""
response = {"shouldEndSession": True}
if speech is not None:
response["outputSpeech"] = {"type": "PlainText", "text": speech}
if card is not None:
response["card"] = {"type": "Simple", "title": title, "content": card}
speech = {"version": "1.0", "response": response, "sessionAttributes": {}}
return speech
@staticmethod
def get_alexa_error(data):
"""Gets the error message from the Alexa API response.
Args:
data: Response received from the Alexa API .
Returns:
A string representing the value of message, or ``None`` if no error message was received.
"""
if "request" in data and "err" in data["request"] and "message" in data["request"]["err"]:
return data["request"]["err"]["message"]
else:
return None
@staticmethod
def get_alexa_intent(data):
"""Gets the Intent's name from the Alexa response.
Args:
data: Response received from Alexa.
Returns:
A string representing the Intent's name from the interaction model that was requested,
or ``None``, if no Intent was received.
Examples:
>>> intent = ADAPI.get_alexa_intent(data)
"""
if "request" in data and "intent" in data["request"] and "name" in data["request"]["intent"]:
return data["request"]["intent"]["name"]
else:
return None
@staticmethod
def get_alexa_slot_value(data, slot=None):
"""Gets values for slots from the interaction model.
Args:
data: The request data received from Alexa.
slot: Name of the slot. If a name is not specified, all slots will be returned as
a dictionary. If a name is specified but is not found, None will be returned.
Returns:
A ``string`` representing the value of the slot from the interaction model, or a ``hash`` of slots.
Examples:
>>> beer_type = ADAPI.get_alexa_intent(data, "beer_type")
>>> all_slots = ADAPI.get_alexa_intent(data)
"""
if "request" in data and "intent" in data["request"] and "slots" in data["request"]["intent"]:
if slot is None:
return data["request"]["intent"]["slots"]
else:
if slot in data["request"]["intent"]["slots"] and "value" in data["request"]["intent"]["slots"][slot]:
return data["request"]["intent"]["slots"][slot]["value"]
else:
return None
else:
return None
#
# API
#
@utils.sync_wrapper
async def register_endpoint(
self, callback: Callable[[Any, dict], Any], endpoint: str = None, **kwargs: Optional[dict]
) -> str:
"""Registers an endpoint for API calls into the current App.
Args:
callback: The function to be called when a request is made to the named endpoint.
endpoint (str, optional): The name of the endpoint to be used for the call (Default: ``None``).
This must be unique across all endpoints, and when not given, the name of the app is used as the endpoint.
It is possible to register multiple endpoints to a single app instance.
Keyword Args:
**kwargs (optional): Zero or more keyword arguments.
Returns:
A handle that can be used to remove the registration.
Examples:
It should be noted that the register function, should return a string (can be empty),
and an HTTP OK status response (e.g., `200`. If this is not added as a returned response,
the function will generate an error each time it is processed.
>>> self.register_endpoint(self.my_callback)
>>> self.register_endpoint(self.alexa_cb, "alexa")
>>> async def alexa_cb(self, request, kwargs):
>>> data = await request.json()
>>> self.log(data)
>>> response = {"message": "Hello World"}
>>> return response, 200
"""
if endpoint is None:
endpoint = self.name
if self.AD.http is not None:
return await self.AD.http.register_endpoint(callback, endpoint, self.name, **kwargs)
else:
self.logger.warning(
"register_endpoint for %s failed - HTTP component is not configured", endpoint,
)
@utils.sync_wrapper
async def deregister_endpoint(self, handle: str) -> None:
"""Removes a previously registered endpoint.
Args:
handle: A handle returned by a previous call to ``register_endpoint``
Returns:
None.
Examples:
>>> self.deregister_endpoint(handle)
"""
await self.AD.http.deregister_endpoint(handle, self.name)
#
# Web Route
#
@utils.sync_wrapper
async def register_route(
self, callback: Callable[[Any, dict], Any], route: str = None, **kwargs: Optional[dict]
) -> str:
"""Registers a route for Web requests into the current App.
By registering an app web route, this allows to make use of AD's internal web server to serve
web clients. All routes registered using this api call, can be accessed using
``http://AD_IP:Port/app/route``.
Args:
callback: The function to be called when a request is made to the named route. This must be an async function
route (str, optional): The name of the route to be used for the request (Default: the app's name).
Keyword Args:
**kwargs (optional): Zero or more keyword arguments.
Returns:
A handle that can be used to remove the registration.
Examples:
It should be noted that the register function, should return a string (can be empty),
and an HTTP OK status response (e.g., `200`. If this is not added as a returned response,
the function will generate an error each time it is processed.
>>> self.register_route(my_callback)
>>> self.register_route(stream_cb, "camera")
"""
if route is None:
route = self.name
if self.AD.http is not None:
return await self.AD.http.register_route(callback, route, self.name, **kwargs)
else:
self.logger.warning("register_route for %s filed - HTTP component is not configured", route)
@utils.sync_wrapper
async def deregister_route(self, handle: str) -> None:
"""Removes a previously registered app route.
Args:
handle: A handle returned by a previous call to ``register_app_route``
Returns:
None.
Examples:
>>> self.deregister_route(handle)
"""
await self.AD.http.deregister_route(handle, self.name)
#
# State
#
@utils.sync_wrapper
async def listen_state(self, callback, entity=None, **kwargs):
"""Registers a callback to react to state changes.
This function allows the user to register a callback for a wide variety of state changes.
Args:
callback: Function to be invoked when the requested state change occurs. It must conform
to the standard State Callback format documented `here <APPGUIDE.html#state-callbacks>`__
entity (str, optional): name of an entity or device type. If just a device type is provided,
e.g., `light`, or `binary_sensor`. ``listen_state()`` will subscribe to state changes of all
devices of that type. If a fully qualified entity_id is provided, ``listen_state()`` will
listen for state changes for just that entity.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
attribute (str, optional): Name of an attribute within the entity state object. If this
parameter is specified in addition to a fully qualified ``entity_id``. ``listen_state()``
will subscribe to changes for just that attribute within that specific entity.
The ``new`` and ``old`` parameters in the callback function will be provided with
a single value representing the attribute.
The value ``all`` for attribute has special significance and will listen for any
state change within the specified entity, and supply the callback functions with
the entire state dictionary for the specified entity rather than an individual
attribute value.
new (optional): If ``new`` is supplied as a parameter, callbacks will only be made if the
state of the selected attribute (usually state) in the new state match the value
of ``new``.
old (optional): If ``old`` is supplied as a parameter, callbacks will only be made if the
state of the selected attribute (usually state) in the old state match the value
of ``old``.
duration (int, optional): If ``duration`` is supplied as a parameter, the callback will not
fire unless the state listened for is maintained for that number of seconds. This
requires that a specific attribute is specified (or the default of ``state`` is used),
and should be used in conjunction with the ``old`` or ``new`` parameters, or both. When
the callback is called, it is supplied with the values of ``entity``, ``attr``, ``old``,
and ``new`` that were current at the time the actual event occurred, since the assumption
is that none of them have changed in the intervening period.
If you use ``duration`` when listening for an entire device type rather than a specific
entity, or for all state changes, you may get unpredictable results, so it is recommended
that this parameter is only used in conjunction with the state of specific entities.
timeout (int, optional): If ``timeout`` is supplied as a parameter, the callback will be created as normal,
but after ``timeout`` seconds, the callback will be removed. If activity for the listened state has
occurred that would trigger a duration timer, the duration timer will still be fired even though the
callback has been deleted.
immediate (bool, optional): It enables the countdown for a delay parameter to start
at the time, if given. If the ``duration`` parameter is not given, the callback runs immediately.
What this means is that after the callback is registered, rather than requiring one or more
state changes before it runs, it immediately checks the entity's states based on given
parameters. If the conditions are right, the callback runs immediately at the time of
registering. This can be useful if, for instance, you want the callback to be triggered
immediately if a light is already `on`, or after a ``duration`` if given.
If ``immediate`` is in use, and ``new`` and ``duration`` are both set, AppDaemon will check
if the entity is already set to the new state and if so it will start the clock
immediately. If ``new`` and ``duration`` are not set, ``immediate`` will trigger the callback
immediately and report in its callback the new parameter as the present state of the
entity. If ``attribute`` is specified, the state of the attribute will be used instead of
state. In these cases, ``old`` will be ignored and when the callback is triggered, its
state will be set to ``None``.
oneshot (bool, optional): If ``True``, the callback will be automatically cancelled
after the first state change that results in a callback.
namespace (str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description. In most cases,
it is safe to ignore this parameter. The value ``global`` for namespace has special
significance and means that the callback will listen to state updates from any plugin.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Sets which thread from the worker pool the callback will be
run by (0 - number of threads -1).
*kwargs (optional): Zero or more keyword arguments that will be supplied to the callback
when it is called.
Notes:
The ``old`` and ``new`` args can be used singly or together.
Returns:
A unique identifier that can be used to cancel the callback if required. Since variables
created within object methods are local to the function they are created in, and in all
likelihood, the cancellation will be invoked later in a different function, it is
recommended that handles are stored in the object namespace, e.g., `self.handle`.
Examples:
Listen for any state change and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback)
Listen for any state change involving a light and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light")
Listen for a state change involving `light.office1` and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1")
Listen for a state change involving `light.office1` and return the entire state as a dict.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "all")
Listen for a change involving the brightness attribute of `light.office1` and return the
brightness attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "brightness")
Listen for a state change involving `light.office1` turning on and return the state attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on")
Listen for a change involving `light.office1` changing from brightness 100 to 200 and return the
brightness attribute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", attribute = "brightness", old = "100", new = "200")
Listen for a state change involving `light.office1` changing to state on and remaining on for a minute.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on", duration = 60)
Listen for a state change involving `light.office1` changing to state on and remaining on for a minute
trigger the delay immediately if the light is already on.
>>> self.handle = self.listen_state(self.my_callback, "light.office_1", new = "on", duration = 60, immediate = True)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
name = self.name
if entity is not None and "." in entity:
await self._check_entity(namespace, entity)
self.logger.debug("Calling listen_state for %s", self.name)
return await self.AD.state.add_state_callback(name, namespace, entity, callback, kwargs)
@utils.sync_wrapper
async def cancel_listen_state(self, handle):
"""Cancels a ``listen_state()`` callback.
This will mean that the App will no longer be notified for the specific
state change that has been cancelled. Other state changes will continue
to be monitored.
Args:
handle: The handle returned when the ``listen_state()`` call was made.
Returns:
Boolean.
Examples:
>>> self.cancel_listen_state(self.office_light_handle)
"""
self.logger.debug("Canceling listen_state for %s", self.name)
return await self.AD.state.cancel_state_callback(handle, self.name)
@utils.sync_wrapper
async def info_listen_state(self, handle):
"""Gets information on state a callback from its handle.
Args:
handle: The handle returned when the ``listen_state()`` call was made.
Returns:
The values supplied for ``entity``, ``attribute``, and ``kwargs`` when
the callback was initially created.
Examples:
>>> entity, attribute, kwargs = self.info_listen_state(self.handle)
"""
self.logger.debug("Calling info_listen_state for %s", self.name)
return await self.AD.state.info_state_callback(handle, self.name)
@utils.sync_wrapper
async def get_state(self, entity_id=None, attribute=None, default=None, copy=True, **kwargs):
"""Gets the state of any component within Home Assistant.
State updates are continuously tracked, so this call runs locally and does not require
AppDaemon to call back to Home Assistant. In other words, states are updated using a
push-based approach instead of a pull-based one.
Args:
entity_id (str, optional): This is the name of an entity or device type. If just
a device type is provided, e.g., `light` or `binary_sensor`, `get_state()`
will return a dictionary of all devices of that type, indexed by the ``entity_id``,
containing all the state for each entity. If a fully qualified ``entity_id``
is provided, ``get_state()`` will return the state attribute for that entity,
e.g., ``on`` or ``off`` for a light.
attribute (str, optional): Name of an attribute within the entity state object.
If this parameter is specified in addition to a fully qualified ``entity_id``,
a single value representing the attribute will be returned. The value ``all``
for attribute has special significance and will return the entire state
dictionary for the specified entity rather than an individual attribute value.
default (any, optional): The value to return when the requested attribute or the
whole entity doesn't exist (Default: ``None``).
copy (bool, optional): By default, a copy of the stored state object is returned.
When you set ``copy`` to ``False``, you get the same object as is stored
internally by AppDaemon. Avoiding the copying brings a small performance gain,
but also gives you write-access to the internal AppDaemon data structures,
which is dangerous. Only disable copying when you can guarantee not to modify
the returned state object, e.g., you do read-only operations.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
Returns:
The entire state of Home Assistant at that given time, if if ``get_state()``
is called with no parameters. This will consist of a dictionary with a key
for each entity. Under that key will be the standard entity state information.
Examples:
Get the state of the entire system.
>>> state = self.get_state()
Get the state of all switches in the system.
>>> state = self.get_state("switch")
Get the state attribute of `light.office_1`.
>>> state = self.get_state("light.office_1")
Get the brightness attribute of `light.office_1`.
>>> state = self.get_state("light.office_1", attribute="brightness")
Get the entire state of `light.office_1`.
>>> state = self.get_state("light.office_1", attribute="all")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return await self.AD.state.get_state(self.name, namespace, entity_id, attribute, default, copy, **kwargs)
@utils.sync_wrapper
async def set_state(self, entity, **kwargs):
"""Updates the state of the specified entity.
Args:
entity (str): The fully qualified entity id (including the device type).
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
state: New state value to be set.
attributes (optional): Entity's attributes to be updated.
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
replace(bool, optional): If a `replace` flag is given and set to ``True`` and ``attributes``
is provided, AD will attempt to replace its internal entity register with the newly
supplied attributes completely. This can be used to replace attributes in an entity
which are no longer needed. Do take note this is only possible for internal entity state.
For plugin based entities, this is not recommended, as the plugin will mostly replace
the new values, when next it updates.
Returns:
A dictionary that represents the new state of the updated entity.
Examples:
Update the state of an entity.
>>> self.set_state("light.office_1", state="off")
Update the state and attribute of an entity.
>>> self.set_state("light.office_1", state = "on", attributes = {"color_name": "red"})
Update the state of an entity within the specified namespace.
>>> self.set_state("light.office_1", state="off", namespace ="hass")
"""
self.logger.debug("set state: %s, %s", entity, kwargs)
namespace = self._get_namespace(**kwargs)
await self._check_entity(namespace, entity)
if "namespace" in kwargs:
del kwargs["namespace"]
return await self.AD.state.set_state(self.name, namespace, entity, **kwargs)
#
# Service
#
@staticmethod
def _check_service(service: str) -> None:
if service.find("/") == -1:
raise ValueError("Invalid Service Name: {}".format(service))
def register_service(
self, service: str, cb: Callable[[str, str, str, dict], Any], **kwargs: Optional[dict]
) -> None:
"""Registers a service that can be called from other apps, the REST API and the Event Stream
Using this function, an App can register a function to be available in the service registry.
This will automatically make it available to other apps using the `call_service()` API call, as well as publish
it as a service in the REST API and make it available to the `call_service` command in the event stream.
It should be noted that registering services within a plugin's namespace is a bad idea. It could work, but not always reliable
It is recommended to make use of this api, within a user definded namespace, or one not tied to a plugin.
Args:
service: Name of the service, in the format `domain/service`. If the domain does not exist it will be created
cb: A reference to the function to be called when the service is requested. This function may be a regular
function, or it may be async. Note that if it is an async function, it will run on AppDaemon's main loop
meaning that any issues with the service could result in a delay of AppDaemon's core functions.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
Returns:
None
Examples:
>>> self.register_service("myservices/service1", self.mycallback)
>>> async def mycallback(self, namespace, domain, service, kwargs):
>>> self.log("Service called")
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("register_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
kwargs["__name"] = self.name
self.AD.services.register_service(namespace, d, s, cb, __async="auto", **kwargs)
def deregister_service(self, service: str, **kwargs: Optional[dict]) -> bool:
"""Deregisters a service that had been previously registered
Using this function, an App can deregister a service call, it has initially registered in the service registry.
This will automatically make it unavailable to other apps using the `call_service()` API call, as well as published
as a service in the REST API and make it unavailable to the `call_service` command in the event stream.
This function can only be used, within the app that registered it in the first place
Args:
service: Name of the service, in the format `domain/service`.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
Returns:
Bool
Examples:
>>> self.deregister_service("myservices/service1")
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("deregister_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
kwargs["__name"] = self.name
return self.AD.services.deregister_service(namespace, d, s, **kwargs)
def list_services(self, **kwargs: Optional[dict]) -> list:
"""List all services available within AD
Using this function, an App can request all available services within AD
Args:
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
**kwargs: Each service has different parameter requirements. This argument
allows you to specify a comma-separated list of keyword value pairs, e.g.,
`namespace = global`.
namespace(str, optional): If a `namespace` is provided, AppDaemon will request
the services within the given namespace. On the other hand, if no namespace is given,
AppDaemon will use the last specified namespace or the default namespace.
To get all services across AD, pass `global`. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
Returns:
All services within the requested namespace
Examples:
>>> self.list_services(namespace="global")
"""
self.logger.debug("list_services: %s", kwargs)
namespace = kwargs.get("namespace", "global")
return self.AD.services.list_services(namespace) # retrieve services
@utils.sync_wrapper
async def call_service(self, service: str, **kwargs: Optional[dict]) -> Any:
"""Calls a Service within AppDaemon.
This function can call any service and provide any required parameters.
By default, there are standard services that can be called within AD. Other
services that can be called, are dependent on the plugin used, or those registered
by individual apps using the `register_service` api.
In a future release, all available services can be found using AD's Admin UI.
For `listed services`, the part before the first period is the ``domain``,
and the part after is the ``service name`. For instance, `light/turn_on`
has a domain of `light` and a service name of `turn_on`.
The default behaviour of the call service api is not to wait for any result, typically
known as "fire and forget". If it is required to get the results of the call, keywords
"return_result" or "callback" can be added.
Args:
service (str): The service name.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
**kwargs: Each service has different parameter requirements. This argument
allows you to specify a comma-separated list of keyword value pairs, e.g.,
`entity_id = light.office_1`. These parameters will be different for
every service and can be discovered using the developer tools. Most all
service calls require an ``entity_id``.
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
return_result(str, option): If `return_result` is provided and set to `True` AD will attempt
to wait for the result, and return it after execution
callback: The non-async callback to be executed when complete.
Returns:
Result of the `call_service` function if any
Examples:
HASS
>>> self.call_service("light/turn_on", entity_id = "light.office_lamp", color_name = "red")
>>> self.call_service("notify/notify", title = "Hello", message = "Hello World")
MQTT
>>> call_service("mqtt/subscribe", topic="homeassistant/living_room/light", qos=2)
>>> call_service("mqtt/publish", topic="homeassistant/living_room/light", payload="on")
Utility
>>> call_service("app/restart", app="notify_app", namespace="appdaemon")
>>> call_service("app/stop", app="lights_app", namespace="appdaemon")
>>> call_service("app/reload", namespace="appdaemon")
For Utility, it is important that the `namespace` arg is set to ``appdaemon``
as no app can work within that `namespace`. If not namespace is specified,
calling this function will rise an error.
"""
self._check_service(service)
d, s = service.split("/")
self.logger.debug("call_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
kwargs["__name"] = self.name
return await self.AD.services.call_service(namespace, d, s, kwargs)
@utils.sync_wrapper
async def run_sequence(self, sequence, **kwargs):
"""Run an AppDaemon Sequence. Sequences are defined in a valid apps.yaml file or inline, and are sequences of
service calls.
Args:
sequence: The sequence name, referring to the correct entry in apps.yaml, or a dict containing
actual commands to run
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): If a `namespace` is provided, AppDaemon will change
the state of the given entity in the given namespace. On the other hand,
if no namespace is given, AppDaemon will use the last specified namespace
or the default namespace. See the section on `namespaces <APPGUIDE.html#namespaces>`__
for a detailed description. In most cases, it is safe to ignore this parameter.
Returns:
A handle that can be used with `cancel_sequence()` to terminate the script.
Examples:
Run a yaml-defined sequence called "sequence.front_room_scene".
>>> handle = self.run_sequence("sequence.front_room_scene")
Run an inline sequence.
>>> handle = self.run_sequence([{"light/turn_on": {"entity_id": "light.office_1"}}, {"sleep": 5}, {"light.turn_off":
{"entity_id": "light.office_1"}}])
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling run_sequence() for %s", self.name)
return await self.AD.sequences.run_sequence(_name, namespace, sequence, **kwargs)
@utils.sync_wrapper
async def cancel_sequence(self, handle):
"""Cancel an AppDaemon Sequence.
Args:
handle: The handle returned by the `run_sequence()` call
Returns:
None.
Examples:
>>> self.run_sequence(handle)
"""
_name = self.name
self.logger.debug("Calling run_sequence() for %s", self.name)
await self.AD.sequences.cancel_sequence(_name, handle)
#
# Events
#
@utils.sync_wrapper
async def listen_event(self, callback, event=None, **kwargs):
"""Registers a callback for a specific event, or any event.
Args:
callback: Function to be invoked when the event is fired.
It must conform to the standard Event Callback format documented `here <APPGUIDE.html#about-event-callbacks>`__
event (optional): Name of the event to subscribe to. Can be a standard
Home Assistant event such as `service_registered` or an arbitrary
custom event such as `"MODE_CHANGE"`. If no event is specified,
`listen_event()` will subscribe to all events.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
oneshot (bool, optional): If ``True``, the callback will be automatically cancelled
after the first state change that results in a callback.
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter. The value ``global``
for namespace has special significance, and means that the callback will
listen to state updates from any plugin.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
timeout (int, optional): If ``timeout`` is supplied as a parameter, the callback will be created as normal,
but after ``timeout`` seconds, the callback will be removed.
**kwargs (optional): One or more keyword value pairs representing App specific
parameters to supply to the callback. If the keywords match values within the
event data, they will act as filters, meaning that if they don't match the
values, the callback will not fire.
As an example of this, a `Minimote` controller when activated will generate
an event called zwave.scene_activated, along with 2 pieces of data that are
specific to the event - entity_id and scene. If you include keyword values
for either of those, the values supplied to the `listen_event()` call must
match the values in the event or it will not fire. If the keywords do not
match any of the data in the event they are simply ignored.
Filtering will work with any event type, but it will be necessary to figure
out the data associated with the event to understand what values can be
filtered on. This can be achieved by examining Home Assistant's `logfiles`
when the event fires.
Returns:
A handle that can be used to cancel the callback.
Examples:
Listen all `"MODE_CHANGE"` events.
>>> self.listen_event(self.mode_event, "MODE_CHANGE")
Listen for a `minimote` event activating scene 3.
>>> self.listen_event(self.generic_event, "zwave.scene_activated", scene_id = 3)
Listen for a `minimote` event activating scene 3 from a specific `minimote`.
>>> self.listen_event(self.generic_event, "zwave.scene_activated", entity_id = "minimote_31", scene_id = 3)
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling listen_event for %s", self.name)
return await self.AD.events.add_event_callback(_name, namespace, callback, event, **kwargs)
@utils.sync_wrapper
async def cancel_listen_event(self, handle):
"""Cancels a callback for a specific event.
Args:
handle: A handle returned from a previous call to ``listen_event()``.
Returns:
Boolean.
Examples:
>>> self.cancel_listen_event(handle)
"""
self.logger.debug("Canceling listen_event for %s", self.name)
return await self.AD.events.cancel_event_callback(self.name, handle)
@utils.sync_wrapper
async def info_listen_event(self, handle):
"""Gets information on an event callback from its handle.
Args:
handle: The handle returned when the ``listen_event()`` call was made.
Returns:
The values (service, kwargs) supplied when the callback was initially created.
Examples:
>>> service, kwargs = self.info_listen_event(handle)
"""
self.logger.debug("Calling info_listen_event for %s", self.name)
return await self.AD.events.info_event_callback(self.name, handle)
@utils.sync_wrapper
async def fire_event(self, event, **kwargs):
"""Fires an event on the AppDaemon bus, for apps and plugins.
Args:
event: Name of the event. Can be a standard Home Assistant event such as
`service_registered` or an arbitrary custom event such as "MODE_CHANGE".
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
namespace(str, optional): Namespace to use for the call. See the section on
`namespaces <APPGUIDE.html#namespaces>`__ for a detailed description.
In most cases, it is safe to ignore this parameter.
**kwargs (optional): Zero or more keyword arguments that will be supplied as
part of the event.
Returns:
None.
Examples:
>>> self.fire_event("MY_CUSTOM_EVENT", jam="true")
"""
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
await self.AD.events.fire_event(namespace, event, **kwargs)
#
# Time
#
def parse_utc_string(self, utc_string):
"""Converts a UTC to its string representation.
Args:
utc_string (str): A string that contains a date and time to convert.
Returns:
An POSIX timestamp that is equivalent to the date and time contained in `utc_string`.
"""
return datetime.datetime(*map(int, re.split(r"[^\d]", utc_string)[:-1])).timestamp() + self.get_tz_offset() * 60
def get_tz_offset(self):
"""Returns the timezone difference between UTC and Local Time in minutes."""
return self.AD.tz.utcoffset(self.datetime()).total_seconds() / 60
@staticmethod
def convert_utc(utc):
"""Gets a `datetime` object for the specified UTC.
Home Assistant provides timestamps of several different sorts that may be
used to gain additional insight into state changes. These timestamps are
in UTC and are coded as `ISO 8601` combined date and time strings. This function
will accept one of these strings and convert it to a localised Python
`datetime` object representing the timestamp.
Args:
utc: An `ISO 8601` encoded date and time string in the following
format: `2016-07-13T14:24:02.040658-04:00`
Returns:
A localised Python `datetime` object representing the timestamp.
"""
return iso8601.parse_date(utc)
@utils.sync_wrapper
async def sun_up(self):
"""Determines if the sun is currently up.
Returns:
bool: ``True`` if the sun is up, ``False`` otherwise.
Examples:
>>> if self.sun_up():
>>> #do something
"""
return await self.AD.sched.sun_up()
@utils.sync_wrapper
async def sun_down(self):
"""Determines if the sun is currently down.
Returns:
bool: ``True`` if the sun is down, ``False`` otherwise.
Examples:
>>> if self.sun_down():
>>> #do something
"""
return await self.AD.sched.sun_down()
@utils.sync_wrapper
async def parse_time(self, time_str, name=None, aware=False):
"""Creates a `time` object from its string representation.
This functions takes a string representation of a time, or sunrise,
or sunset offset and converts it to a datetime.time object.
Args:
time_str (str): A representation of the time in a string format with one
of the following formats:
a. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
b. ``sunrise|sunset [+|- HH:MM:SS]`` - time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes and seconds.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
aware (bool, optional): If ``True`` the created time object will be aware of timezone.
Returns:
A `time` object, representing the time given in the `time_str` argument.
Examples:
>>> self.parse_time("17:30:00")
17:30:00
>>> time = self.parse_time("sunrise")
04:33:17
>>> time = self.parse_time("sunset + 00:30:00")
19:18:48
>>> time = self.parse_time("sunrise + 01:00:00")
05:33:17
"""
return await self.AD.sched.parse_time(time_str, name, aware)
@utils.sync_wrapper
async def parse_datetime(self, time_str, name=None, aware=False):
"""Creates a `datetime` object from its string representation.
This function takes a string representation of a date and time, or sunrise,
or sunset offset and converts it to a `datetime` object.
Args:
time_str (str): A string representation of the datetime with one of the
following formats:
a. ``YY-MM-DD-HH:MM:SS`` - the date and time in Year, Month, Day, Hours,
Minutes, and Seconds, 24 hour format.
b. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
c. ``sunrise|sunset [+|- HH:MM:SS]`` - time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes and seconds.
If the ``HH:MM:SS`` format is used, the resulting datetime object will have
today's date.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
aware (bool, optional): If ``True`` the created datetime object will be aware
of timezone.
Returns:
A `datetime` object, representing the time and date given in the
`time_str` argument.
Examples:
>>> self.parse_datetime("2018-08-09 17:30:00")
2018-08-09 17:30:00
>>> self.parse_datetime("17:30:00")
2019-08-15 17:30:00
>>> self.parse_datetime("sunrise")
2019-08-16 05:33:17
>>> self.parse_datetime("sunset + 00:30:00")
2019-08-16 19:18:48
>>> self.parse_datetime("sunrise + 01:00:00")
2019-08-16 06:33:17
"""
return await self.AD.sched.parse_datetime(time_str, name, aware)
@utils.sync_wrapper
async def get_now(self):
"""Returns the current Local Date and Time.
Examples:
>>> self.get_now()
2019-08-16 21:17:41.098813+00:00
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz)
@utils.sync_wrapper
async def get_now_ts(self):
"""Returns the current Local Timestamp.
Examples:
>>> self.get_now_ts()
1565990318.728324
"""
return await self.AD.sched.get_now_ts()
@utils.sync_wrapper
async def now_is_between(self, start_time, end_time, name=None):
"""Determines is the current `time` is within the specified start and end times.
This function takes two string representations of a ``time``, or ``sunrise`` or ``sunset``
offset and returns ``true`` if the current time is between those 2 times. Its
implementation can correctly handle transitions across midnight.
Args:
start_time (str): A string representation of the start time.
end_time (str): A string representation of the end time.
name (str, optional): Name of the calling app or module. It is used only for logging purposes.
Returns:
bool: ``True`` if the current time is within the specified start and end times,
``False`` otherwise.
Notes:
The string representation of the ``start_time`` and ``end_time`` should follows
one of these formats:
a. ``HH:MM:SS`` - the time in Hours Minutes and Seconds, 24 hour format.
b. ``sunrise|sunset [+|- HH:MM:SS]``- time of the next sunrise or sunset
with an optional positive or negative offset in Hours Minutes,
and Seconds.
Examples:
>>> if self.now_is_between("17:30:00", "08:00:00"):
>>> #do something
>>> if self.now_is_between("sunset - 00:45:00", "sunrise + 00:45:00"):
>>> #do something
"""
return await self.AD.sched.now_is_between(start_time, end_time, name)
@utils.sync_wrapper
async def sunrise(self, aware=False):
"""Returns a `datetime` object that represents the next time Sunrise will occur.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.sunrise()
2019-08-16 05:33:17
"""
return await self.AD.sched.sunrise(aware)
@utils.sync_wrapper
async def sunset(self, aware=False):
"""Returns a `datetime` object that represents the next time Sunset will occur.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.sunset()
2019-08-16 19:48:48
"""
return await self.AD.sched.sunset(aware)
@utils.sync_wrapper
async def time(self):
"""Returns a localised `time` object representing the current Local Time.
Use this in preference to the standard Python ways to discover the current time,
especially when using the "Time Travel" feature for testing.
Examples:
>>> self.time()
20:15:31.295751
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz).time()
@utils.sync_wrapper
async def datetime(self, aware=False):
"""Returns a `datetime` object representing the current Local Date and Time.
Use this in preference to the standard Python ways to discover the current
datetime, especially when using the "Time Travel" feature for testing.
Args:
aware (bool, optional): Specifies if the created datetime object will be
`aware` of timezone or `not`.
Examples:
>>> self.datetime()
2019-08-15 20:15:55.549379
"""
if aware is True:
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz)
else:
return await self.AD.sched.get_now_naive()
@utils.sync_wrapper
async def date(self):
"""Returns a localised `date` object representing the current Local Date.
Use this in preference to the standard Python ways to discover the current date,
especially when using the "Time Travel" feature for testing.
Examples:
>>> self.date()
2019-08-15
"""
now = await self.AD.sched.get_now()
return now.astimezone(self.AD.tz).date()
def get_timezone(self):
"""Returns the current time zone."""
return self.AD.time_zone
#
# Scheduler
#
@utils.sync_wrapper
async def timer_running(self, handle):
"""Checks if a previously created timer is still running.
Args:
handle: A handle value returned from the original call to create the timer.
Returns:
Boolean.
Examples:
>>> self.timer_running(handle)
"""
name = self.name
self.logger.debug("Checking timer with handle %s for %s", handle, self.name)
return self.AD.sched.timer_running(name, handle)
@utils.sync_wrapper
async def cancel_timer(self, handle):
"""Cancels a previously created timer.
Args:
handle: A handle value returned from the original call to create the timer.
Returns:
Boolean.
Examples:
>>> self.cancel_timer(handle)
"""
name = self.name
self.logger.debug("Canceling timer with handle %s for %s", handle, self.name)
return await self.AD.sched.cancel_timer(name, handle)
@utils.sync_wrapper
async def info_timer(self, handle):
"""Gets information on a scheduler event from its handle.
Args:
handle: The handle returned when the scheduler call was made.
Returns:
`time` - datetime object representing the next time the callback will be fired
`interval` - repeat interval if applicable, `0` otherwise.
`kwargs` - the values supplied when the callback was initially created.
or ``None`` - if handle is invalid or timer no longer exists.
Examples:
>>> time, interval, kwargs = self.info_timer(handle)
"""
return await self.AD.sched.info_timer(handle, self.name)
@utils.sync_wrapper
async def run_in(self, callback, delay, **kwargs):
"""Runs the callback in a defined number of seconds.
This is used to add a delay, for instance, a 60 second delay before
a light is turned off after it has been triggered by a motion detector.
This callback should always be used instead of ``time.sleep()`` as
discussed previously.
Args:
callback: Function to be invoked when the requested state change occurs.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
delay (int): Delay, in seconds before the callback is invoked.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run the specified callback after 10 seconds.
>>> self.handle = self.run_in(self.run_in_c, 10)
Run the specified callback after 10 seconds with a keyword arg (title).
>>> self.handle = self.run_in(self.run_in_c, 5, title = "run_in5")
"""
name = self.name
self.logger.debug("Registering run_in in %s seconds for %s", delay, name)
# convert seconds to an int if possible since a common pattern is to
# pass this through from the config file which is a string
exec_time = await self.get_now() + timedelta(seconds=int(delay))
handle = await self.AD.sched.insert_schedule(name, exec_time, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_once(self, callback, start, **kwargs):
"""Runs the callback once, at the specified time of day.
Args:
callback: Function to be invoked at the specified time of day.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur. If the time
specified is in the past, the callback will occur the ``next day`` at
the specified time.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run at 4pm today, or 4pm tomorrow if it is already after 4pm.
>>> runtime = datetime.time(16, 0, 0)
>>> handle = self.run_once(self.run_once_c, runtime)
Run today at 10:30 using the `parse_time()` function.
>>> handle = self.run_once(self.run_once_c, "10:30:00")
Run at sunset.
>>> handle = self.run_once(self.run_once_c, "sunset")
Run an hour after sunrise.
>>> handle = self.run_once(self.run_once_c, "sunrise + 01:00:00")
"""
if type(start) == datetime.time:
when = start
elif type(start) == str:
start_time_obj = await self.AD.sched._parse_time(start, self.name)
when = start_time_obj["datetime"].time()
else:
raise ValueError("Invalid type for start")
name = self.name
self.logger.debug("Registering run_once at %s for %s", when, name)
now = await self.get_now()
today = now.date()
event = datetime.datetime.combine(today, when)
aware_event = self.AD.sched.convert_naive(event)
if aware_event < now:
one_day = datetime.timedelta(days=1)
aware_event = aware_event + one_day
handle = await self.AD.sched.insert_schedule(name, aware_event, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_at(self, callback, start, **kwargs):
"""Runs the callback once, at the specified time of day.
Args:
callback: Function to be invoked at the specified time of day.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
The ``run_at()`` function will ``raise`` an exception if the specified time is in the ``past``.
Examples:
Run at 4pm today.
>>> runtime = datetime.time(16, 0, 0)
>>> today = datetime.date.today()
>>> event = datetime.datetime.combine(today, runtime)
>>> handle = self.run_at(self.run_at_c, event)
Run today at 10:30 using the `parse_time()` function.
>>> handle = self.run_at(self.run_at_c, "10:30:00")
Run on a specific date and time.
>>> handle = self.run_at(self.run_at_c, "2018-12-11 10:30:00")
Run at the next sunset.
>>> handle = self.run_at(self.run_at_c, "sunset")
Run an hour after the next sunrise.
>>> handle = self.run_at(self.run_at_c, "sunrise + 01:00:00")
"""
if type(start) == datetime.datetime:
when = start
elif type(start) == str:
start_time_obj = await self.AD.sched._parse_time(start, self.name)
when = start_time_obj["datetime"]
else:
raise ValueError("Invalid type for start")
aware_when = self.AD.sched.convert_naive(when)
name = self.name
self.logger.debug("Registering run_at at %s for %s", when, name)
now = await self.get_now()
if aware_when < now:
raise ValueError("{}: run_at() Start time must be " "in the future".format(self.name))
handle = await self.AD.sched.insert_schedule(name, aware_when, callback, False, None, **kwargs)
return handle
@utils.sync_wrapper
async def run_daily(self, callback, start, **kwargs):
"""Runs the callback at the same time every day.
Args:
callback: Function to be invoked every day at the specified time.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: Should be either a Python ``time`` object or a ``parse_time()`` formatted
string that specifies when the callback will occur. If the time
specified is in the past, the callback will occur the ``next day`` at
the specified time.
When specifying sunrise or sunset relative times using the ``parse_datetime()``
format, the time of the callback will be adjusted every day to track the actual
value of sunrise or sunset.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run daily at 7pm.
>>> runtime = datetime.time(19, 0, 0)
>>> self.run_daily(self.run_daily_c, runtime)
Run at 10:30 every day using the `parse_time()` function.
>>> handle = self.run_daily(self.run_daily_c, "10:30:00")
Run every day at sunrise.
>>> handle = self.run_daily(self.run_daily_c, "sunrise")
Run every day an hour after sunset.
>>> handle = self.run_daily(self.run_daily_c, "sunset + 01:00:00")
"""
info = None
when = None
if type(start) == datetime.time:
when = start
elif type(start) == str:
info = await self.AD.sched._parse_time(start, self.name)
else:
raise ValueError("Invalid type for start")
if info is None or info["sun"] is None:
if when is None:
when = info["datetime"].time()
aware_now = await self.get_now()
now = self.AD.sched.make_naive(aware_now)
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
event = event + datetime.timedelta(days=1)
handle = await self.run_every(callback, event, 24 * 60 * 60, **kwargs)
elif info["sun"] == "sunrise":
kwargs["offset"] = info["offset"]
handle = await self.run_at_sunrise(callback, **kwargs)
else:
kwargs["offset"] = info["offset"]
handle = await self.run_at_sunset(callback, **kwargs)
return handle
@utils.sync_wrapper
async def run_hourly(self, callback, start, **kwargs):
"""Runs the callback at the same time every hour.
Args:
callback: Function to be invoked every hour at the specified time.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``time`` object that specifies when the callback will occur,
the hour component of the time object is ignored. If the time specified
is in the past, the callback will occur the ``next hour`` at the specified
time. If time is not supplied, the callback will start an hour from the
time that ``run_hourly()`` was executed.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every hour, on the hour.
>>> runtime = datetime.time(0, 0, 0)
>>> self.run_hourly(self.run_hourly_c, runtime)
"""
now = await self.get_now()
if start is None:
event = now + datetime.timedelta(hours=1)
else:
event = now
event = event.replace(minute=start.minute, second=start.second)
if event < now:
event = event + datetime.timedelta(hours=1)
handle = await self.run_every(callback, event, 60 * 60, **kwargs)
return handle
@utils.sync_wrapper
async def run_minutely(self, callback, start, **kwargs):
"""Runs the callback at the same time every minute.
Args:
callback: Function to be invoked every minute.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``time`` object that specifies when the callback will occur,
the hour and minute components of the time object are ignored. If the
time specified is in the past, the callback will occur the ``next minute`` at
the specified time. If time is not supplied, the callback will start a
minute from the time that ``run_minutely()`` was executed.
**kwargs (optional): Zero or more keyword arguments.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If True, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every minute on the minute.
>>> time = datetime.time(0, 0, 0)
>>> self.run_minutely(self.run_minutely_c, time)
"""
now = await self.get_now()
if start is None:
event = now + datetime.timedelta(minutes=1)
else:
event = now
event = event.replace(second=start.second)
if event < now:
event = event + datetime.timedelta(minutes=1)
handle = await self.run_every(callback, event, 60, **kwargs)
return handle
@utils.sync_wrapper
async def run_every(self, callback, start, interval, **kwargs):
"""Runs the callback with a configurable delay starting at a specific time.
Args:
callback: Function to be invoked when the time interval is reached.
It must conform to the standard Scheduler Callback format documented
`here <APPGUIDE.html#about-schedule-callbacks>`__.
start: A Python ``datetime`` object that specifies when the initial callback
will occur, or can take the `now` string alongside an added offset. If given
in the past, it will be executed in the next interval time.
interval: Frequency (expressed in seconds) in which the callback should be executed.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run every 17 minutes starting in 2 hours time.
>>> self.run_every(self.run_every_c, time, 17 * 60)
Run every 10 minutes starting now.
>>> self.run_every(self.run_every_c, "now", 10 * 60)
Run every 5 minutes starting now plus 5 seconds.
>>> self.run_every(self.run_every_c, "now+5", 5 * 60)
"""
name = self.name
now = await self.get_now()
if isinstance(start, str) and "now" in start: # meaning immediate time required
now_offset = 0
if "+" in start: # meaning time to be added
now_offset = int(re.findall(r"\d+", start)[0])
aware_start = await self.get_now()
aware_start = aware_start + datetime.timedelta(seconds=now_offset)
else:
aware_start = self.AD.sched.convert_naive(start)
if aware_start < now:
aware_start = now + datetime.timedelta(seconds=interval)
self.logger.debug(
"Registering run_every starting %s in %ss intervals for %s", aware_start, interval, name,
)
handle = await self.AD.sched.insert_schedule(
name, aware_start, callback, True, None, interval=interval, **kwargs
)
return handle
@utils.sync_wrapper
async def _schedule_sun(self, name, type_, callback, **kwargs):
if type_ == "next_rising":
event = self.AD.sched.next_sunrise()
else:
event = self.AD.sched.next_sunset()
handle = await self.AD.sched.insert_schedule(name, event, callback, True, type_, **kwargs)
return handle
@utils.sync_wrapper
async def run_at_sunset(self, callback, **kwargs):
"""Runs a callback every day at or around sunset.
Args:
callback: Function to be invoked at or around sunset. It must conform to the
standard Scheduler Callback format documented `here <APPGUIDE.html#about-schedule-callbacks>`__.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
offset (int, optional): The time in seconds that the callback should be delayed after
sunset. A negative value will result in the callback occurring before sunset.
This parameter cannot be combined with ``random_start`` or ``random_end``.
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Example using timedelta.
>>> self.run_at_sunset(self.sun, offset = datetime.timedelta(minutes = -45).total_seconds())
Or you can just do the math yourself.
>>> self.run_at_sunset(self.sun, offset = 30 * 60)
Run at a random time +/- 60 minutes from sunset.
>>> self.run_at_sunset(self.sun, random_start = -60*60, random_end = 60*60)
Run at a random time between 30 and 60 minutes before sunset.
>>> self.run_at_sunset(self.sun, random_start = -60*60, random_end = 30*60)
"""
name = self.name
self.logger.debug("Registering run_at_sunset with kwargs = %s for %s", kwargs, name)
handle = await self._schedule_sun(name, "next_setting", callback, **kwargs)
return handle
@utils.sync_wrapper
async def run_at_sunrise(self, callback, **kwargs):
"""Runs a callback every day at or around sunrise.
Args:
callback: Function to be invoked at or around sunrise. It must conform to the
standard Scheduler Callback format documented `here <APPGUIDE.html#about-schedule-callbacks>`__.
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Keyword Args:
offset (int, optional): The time in seconds that the callback should be delayed after
sunrise. A negative value will result in the callback occurring before sunrise.
This parameter cannot be combined with ``random_start`` or ``random_end``.
random_start (int): Start of range of the random time.
random_end (int): End of range of the random time.
pin (bool, optional): If ``True``, the callback will be pinned to a particular thread.
pin_thread (int, optional): Specify which thread from the worker pool the callback
will be run by (0 - number of threads -1).
Returns:
A handle that can be used to cancel the timer.
Notes:
The ``random_start`` value must always be numerically lower than ``random_end`` value,
they can be negative to denote a random offset before and event, or positive to
denote a random offset after an event.
Examples:
Run 45 minutes before sunset.
>>> self.run_at_sunrise(self.sun, offset = datetime.timedelta(minutes = -45).total_seconds())
Or you can just do the math yourself.
>>> self.run_at_sunrise(self.sun, offset = 30 * 60)
Run at a random time +/- 60 minutes from sunrise.
>>> self.run_at_sunrise(self.sun, random_start = -60*60, random_end = 60*60)
Run at a random time between 30 and 60 minutes before sunrise.
>>> self.run_at_sunrise(self.sun, random_start = -60*60, random_end = 30*60)
"""
name = self.name
self.logger.debug("Registering run_at_sunrise with kwargs = %s for %s", kwargs, name)
handle = await self._schedule_sun(name, "next_rising", callback, **kwargs)
return handle
#
# Dashboard
#
def dash_navigate(self, target, timeout=-1, ret=None, sticky=0, deviceid=None, dashid=None):
"""Forces all connected Dashboards to navigate to a new URL.
Args:
target (str): Name of the new Dashboard to navigate to (e.g., ``/SensorPanel``).
Note that this value is not a URL.
timeout (int): Length of time to stay on the new dashboard before returning
to the original. This argument is optional and if not specified, the
navigation will be permanent. Note that if there is a click or touch on
the new panel before the timeout expires, the timeout will be cancelled.
ret (str): Dashboard to return to after the timeout has elapsed.
sticky (int): Specifies whether or not to return to the original dashboard
after it has been clicked on. The default behavior (``sticky=0``) is to remain
on the new dashboard if clicked, or return to the original otherwise.
By using a different value (sticky= 5), clicking the dashboard will extend
the amount of time (in seconds), but it will return to the original dashboard
after a period of inactivity equal to timeout.
deviceid (str): If set, only the device which has the same deviceid will navigate.
dashid (str): If set, all devices currently on a dashboard which the title contains
the substring dashid will navigate. ex: if dashid is "kichen", it will match
devices which are on "kitchen lights", "kitchen sensors", "ipad - kitchen", etc.
Returns:
None.
Examples:
Switch to AlarmStatus Panel then return to current panel after 10 seconds.
>>> self.dash_navigate("/AlarmStatus", timeout=10)
Switch to Locks Panel then return to Main panel after 10 seconds.
>>> self.dash_navigate("/Locks", timeout=10, ret="/SensorPanel")
"""
kwargs = {"command": "navigate", "target": target, "sticky": sticky}
if timeout != -1:
kwargs["timeout"] = timeout
if ret is not None:
kwargs["return"] = ret
if deviceid is not None:
kwargs["deviceid"] = deviceid
if dashid is not None:
kwargs["dashid"] = dashid
self.fire_event("ad_dashboard", **kwargs)
#
# Async
#
async def run_in_executor(self, func, *args, **kwargs):
"""Runs a Sync function from within an Async function using Executor threads.
The function is actually awaited during execution
Args:
func: The function to be executed.
*args (optional): Any additional arguments to be used by the function
**kwargs (optional): Any additional keyword arguments to be used by the function
Returns:
None
Examples:
>>> await self.run_in_executor(self.run_request)
"""
return await utils.run_in_executor(self, func, *args, **kwargs)
def submit_to_executor(self, func, *args, **kwargs):
"""Submits a Sync function from within another Sync function to be executed using Executor threads.
The function is not waited to be executed. As it submits and continues the rest of the code.
This can be useful if wanting to execute a long running code, and don't want it to hold up the
thread for other callbacks.
Args:
func: The function to be executed.
*args (optional): Any additional arguments to be used by the function
**kwargs (optional): Any additional keyword arguments to be used by the function.
Part of the keyword arguments will be the ``callback``, which will be ran when the function has completed execution
Returns:
A Future, which can be cancelled by calling f.cancel().
Examples:
>>> f = self.submit_to_executor(self.run_request, callback=self.callback)
>>>
>>> def callback(self, kwargs):
"""
callback = kwargs.pop("callback", None)
# get stuff we'll need to fake scheduler call
sched_data = {
"id": uuid.uuid4().hex,
"name": self.name,
"objectid": self.AD.app_management.objects[self.name]["id"],
"type": "scheduler",
"function": callback,
"pin_app": self.get_app_pin(),
"pin_thread": self.get_pin_thread(),
}
def callback_inner(f):
try:
# TODO: use our own callback type instead of borrowing
# from scheduler
rargs = {}
rargs["result"] = f.result()
sched_data["kwargs"] = rargs
self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))
# callback(f.result(), kwargs)
except Exception as e:
self.error(e, level="ERROR")
f = self.AD.executor.submit(func, *args, **kwargs)
if callback is not None:
self.logger.debug("Adding add_done_callback for future %s for %s", f, self.name)
f.add_done_callback(callback_inner)
self.AD.futures.add_future(self.name, f)
return f
@utils.sync_wrapper
async def create_task(self, coro, callback=None, **kwargs):
"""Schedules a Coroutine to be executed.
Args:
coro: The coroutine object (`not coroutine function`) to be executed.
callback: The non-async callback to be executed when complete.
**kwargs (optional): Any additional keyword arguments to send the callback.
Returns:
A Future, which can be cancelled by calling f.cancel().
Examples:
>>> f = self.create_task(asyncio.sleep(3), callback=self.coro_callback)
>>>
>>> def coro_callback(self, kwargs):
"""
# get stuff we'll need to fake scheduler call
sched_data = {
"id": uuid.uuid4().hex,
"name": self.name,
"objectid": self.AD.app_management.objects[self.name]["id"],
"type": "scheduler",
"function": callback,
"pin_app": await self.get_app_pin(),
"pin_thread": await self.get_pin_thread(),
}
def callback_inner(f):
try:
# TODO: use our own callback type instead of borrowing
# from scheduler
kwargs["result"] = f.result()
sched_data["kwargs"] = kwargs
self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))
# callback(f.result(), kwargs)
except asyncio.CancelledError:
pass
f = asyncio.create_task(coro)
if callback is not None:
self.logger.debug("Adding add_done_callback for future %s for %s", f, self.name)
f.add_done_callback(callback_inner)
self.AD.futures.add_future(self.name, f)
return f
@staticmethod
async def sleep(delay, result=None):
"""Pause execution for a certain time span
(not available in sync apps)
Args:
delay (float): Number of seconds to pause.
result (optional): Result to return upon delay completion.
Returns:
Result or `None`.
Notes:
This function is not available in sync apps.
Examples:
>>> async def myfunction(self):
>>> await self.sleep(5)
"""
is_async = None
try:
asyncio.get_event_loop()
is_async = True
except RuntimeError:
is_async = False
if not is_async:
raise RuntimeError("The sleep method is for use in ASYNC methods only")
return await asyncio.sleep(delay, result=result)
#
# Other
#
def run_in_thread(self, callback, thread, **kwargs):
"""Schedules a callback to be run in a different thread from the current one.
Args:
callback: Function to be run on the new thread.
thread (int): Thread number (0 - number of threads).
**kwargs: Arbitrary keyword parameters to be provided to the callback
function when it is invoked.
Returns:
None.
Examples:
>>> self.run_in_thread(my_callback, 8)
"""
self.run_in(callback, 0, pin=False, pin_thread=thread, **kwargs)
@utils.sync_wrapper
async def get_thread_info(self):
"""Gets information on AppDaemon worker threads.
Returns:
A dictionary containing all the information for AppDaemon worker threads.
Examples:
>>> thread_info = self.get_thread_info()
"""
return await self.AD.threading.get_thread_info()
@utils.sync_wrapper
async def get_scheduler_entries(self):
"""Gets information on AppDaemon scheduler entries.
Returns:
A dictionary containing all the information for entries in the AppDaemon scheduler.
Examples:
>>> schedule = self.get_scheduler_entries()
"""
return await self.AD.sched.get_scheduler_entries()
@utils.sync_wrapper
async def get_callback_entries(self):
"""Gets information on AppDaemon callback entries.
Returns:
A dictionary containing all the information for entries in the AppDaemon state,
and event callback table.
Examples:
>>> callbacks = self.get_callback_entries()
"""
return await self.AD.callbacks.get_callback_entries()
@utils.sync_wrapper
async def depends_on_module(self, *modules):
"""Registers a global_modules dependency for an app.
Args:
*modules: Modules to register a dependency on.
Returns:
None.
Examples:
>>> import somemodule
>>> import anothermodule
>>> # later
>>> self.depends_on_module([somemodule)
"""
return await self.AD.app_management.register_module_dependency(self.name, *modules)
<|code_end|>
appdaemon/services.py
<|code_start|>import threading
import traceback
import asyncio
from copy import deepcopy
from typing import Any, Optional, Callable, Awaitable
from appdaemon.appdaemon import AppDaemon
from appdaemon.exceptions import NamespaceException
import appdaemon.utils as utils
class Services:
def __init__(self, ad: AppDaemon):
self.AD = ad
self.services = {}
self.services_lock = threading.RLock()
self.app_registered_services = {}
self.logger = ad.logging.get_child("_services")
def register_service(
self, namespace: str, domain: str, service: str, callback: Callable, **kwargs: Optional[dict]
) -> None:
self.logger.debug(
"register_service called: %s.%s.%s -> %s", namespace, domain, service, callback,
)
__silent = kwargs.pop("__silent", False)
with self.services_lock:
name = kwargs.get("__name")
# first we confirm if the namespace exists
if name and namespace not in self.AD.state.state:
raise NamespaceException(f"Namespace '{namespace}', doesn't exist")
elif not callable(callback):
raise ValueError(f"The given callback {callback} is not a callable function")
if namespace not in self.services:
self.services[namespace] = {}
if domain not in self.services[namespace]:
self.services[namespace][domain] = {}
if service in self.services[namespace][domain]:
# there was a service already registered before
# so if a different app, we ask to deregister first
service_app = self.services[namespace][domain][service].get("__name")
if service_app and service_app != name:
self.logger.warning(
f"This service '{domain}/{service}' already registered to a different app '{service_app}'. Do deregister from app first"
)
return
self.services[namespace][domain][service] = {"callback": callback, **kwargs}
if __silent is False:
data = {
"event_type": "service_registered",
"data": {"namespace": namespace, "domain": domain, "service": service},
}
self.AD.loop.create_task(self.AD.events.process_event(namespace, data))
if name:
if name not in self.app_registered_services:
self.app_registered_services[name] = set()
self.app_registered_services[name].add(f"{namespace}:{domain}:{service}")
def deregister_service(self, namespace: str, domain: str, service: str, **kwargs: dict) -> bool:
"""Used to unregister a service"""
self.logger.debug(
"deregister_service called: %s:%s:%s %s", namespace, domain, service, kwargs,
)
name = kwargs.get("__name")
if not name:
raise ValueError("App must be given to deregister service call")
if name not in self.app_registered_services:
raise ValueError("The given App %s has no services registered", name)
app_service = f"{namespace}:{domain}:{service}"
if app_service not in self.app_registered_services[name]:
raise ValueError("The given App %s doesn't have the given service registered it", name)
# if it gets here, then time to deregister
with self.services_lock:
# it belongs to the app
del self.services[namespace][domain][service]
data = {
"event_type": "service_deregistered",
"data": {"namespace": namespace, "domain": domain, "service": service, "app": name},
}
self.AD.loop.create_task(self.AD.events.process_event(namespace, data))
# now check if that domain is empty
# if it is, remove it also
if self.services[namespace][domain] == {}:
# its empty
del self.services[namespace][domain]
# now check if that namespace is empty
# if it is, remove it also
if self.services[namespace] == {}:
# its empty
del self.services[namespace]
self.app_registered_services[name].remove(app_service)
if not self.app_registered_services[name]:
del self.app_registered_services[name]
return True
def clear_services(self, name: str) -> None:
"""Used to clear services"""
if name not in self.app_registered_services:
return
app_services = deepcopy(self.app_registered_services[name])
for app_service in app_services:
namespace, domain, service = app_service.split(":")
self.deregister_service(namespace, domain, service, __name=name)
def list_services(self, ns: str = "global") -> list:
result = []
with self.services_lock:
for namespace in self.services:
if ns != "global" and namespace != ns:
continue
for domain in self.services[namespace]:
for service in self.services[namespace][domain]:
result.append({"namespace": namespace, "domain": domain, "service": service})
return result
async def call_service(self, namespace: str, domain: str, service: str, data: dict) -> Any:
self.logger.debug(
"call_service: namespace=%s domain=%s service=%s data=%s", namespace, domain, service, data,
)
with self.services_lock:
name = data.pop("__name", None)
if namespace not in self.services:
self.logger.warning("Unknown namespace (%s) in call_service from %s", namespace, name)
return None
if domain not in self.services[namespace]:
self.logger.warning(
"Unknown domain (%s/%s) in call_service from %s", namespace, domain, name,
)
return None
if service not in self.services[namespace][domain]:
self.logger.warning(
"Unknown service (%s/%s/%s) in call_service from %s", namespace, domain, service, name,
)
return None
# If we have namespace in data it's an override for the domain of the eventual service call, as distinct
# from the namespace the call itself is executed from. e.g. set_state() is in the AppDaemon namespace but
# needs to operate on a different namespace, e.g. "default"
if "namespace" in data:
ns = data["namespace"]
del data["namespace"]
else:
ns = namespace
funcref = self.services[namespace][domain][service]["callback"]
# Decide whether or not to call this as async
# Default to true
isasync = True
# if to wait for results, default to False
return_result = data.pop("return_result", False)
# if to return results via callback
callback = data.pop("callback", None)
if "__async" in self.services[namespace][domain][service]:
# We have a kwarg to tell us what to do
if self.services[namespace][domain][service]["__async"] == "auto":
# We decide based on introspection
if not asyncio.iscoroutinefunction(funcref):
isasync = False
else:
# We do what the kwarg tells us
isasync = self.services[namespace][domain][service]["__async"]
if isasync is True:
# it's a coroutine just await it.
coro = funcref(ns, domain, service, data)
else:
# It's not a coroutine, , run it in an executor
coro = utils.run_in_executor(self, funcref, ns, domain, service, data)
if return_result is True:
return await self.run_service(coro)
elif callback is not None and name is not None:
# results expected and it must belong to an app
app_object = await self.AD.app_management.get_app(name)
app_object.create_task(self.run_service(coro), callback=callback)
else:
asyncio.create_task(self.run_service(coro))
async def run_service(self, coro: Awaitable) -> Any:
"""Used to process a service call"""
try:
return await coro
except Exception:
self.logger.error("-" * 60)
self.logger.error("Unexpected error in call_service()")
self.logger.error("-" * 60)
self.logger.error(traceback.format_exc())
self.logger.error("-" * 60)
return None
<|code_end|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.