repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
all-of-us/raw-data-repository | rdr_service/alembic/versions/079772728b59_removing_deviation_requirement.py | 1 | 2308 | """removing deviation requirement
Revision ID: 079772728b59
Revises: 679d13d850ce
Create Date: 2020-05-01 16:09:17.321892
"""
from alembic import op
import sqlalchemy as sa
import model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '079772728b59'
down_revision = 'a6333953cc2b'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('biobank_aliquot', 'deviations',
existing_type=mysql.JSON(),
nullable=True)
op.alter_column('biobank_specimen', 'deviations',
existing_type=mysql.JSON(),
nullable=True)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('biobank_specimen', 'deviations',
existing_type=mysql.JSON(),
nullable=False)
op.alter_column('biobank_aliquot', 'deviations',
existing_type=mysql.JSON(),
nullable=False)
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 4258c7b6795d913eee06d1bb5c14f280 | 31.507042 | 125 | 0.713605 | 3.76509 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/038364a84126_add_fields_for_workbench_api.py | 1 | 4057 | """add fields for workbench api
Revision ID: 038364a84126
Revises: 27b812b403cc
Create Date: 2020-01-08 14:06:44.251432
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import WorkbenchResearcherSexAtBirth, WorkbenchResearcherSexualOrientation
# revision identifiers, used by Alembic.
revision = '038364a84126'
down_revision = '27b812b403cc'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE `workbench_researcher` MODIFY `ethnicity` smallint(6);")
op.execute("ALTER TABLE `workbench_researcher_history` MODIFY `ethnicity` smallint(6);")
op.add_column('workbench_researcher', sa.Column('sex_at_birth',
rdr_service.model.utils.Enum(WorkbenchResearcherSexAtBirth),
nullable=True))
op.add_column('workbench_researcher', sa.Column('sexual_orientation',
rdr_service.model.utils.Enum(WorkbenchResearcherSexualOrientation),
nullable=True))
op.add_column('workbench_researcher_history', sa.Column('sex_at_birth',
rdr_service.model.utils.Enum(WorkbenchResearcherSexAtBirth),
nullable=True))
op.add_column('workbench_researcher_history', sa.Column('sexual_orientation',
rdr_service.model.utils.Enum(
WorkbenchResearcherSexualOrientation),
nullable=True))
op.add_column('workbench_workspace', sa.Column('findings_from_study', sa.String(length=500), nullable=True))
op.add_column('workbench_workspace', sa.Column('intend_to_study', sa.String(length=500), nullable=True))
op.add_column('workbench_workspace', sa.Column('reason_for_investigation', sa.String(length=500), nullable=True))
op.add_column('workbench_workspace_history', sa.Column('findings_from_study', sa.String(length=500), nullable=True))
op.add_column('workbench_workspace_history', sa.Column('intend_to_study', sa.String(length=500), nullable=True))
op.add_column('workbench_workspace_history', sa.Column('reason_for_investigation', sa.String(length=500),
nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('workbench_workspace_history', 'reason_for_investigation')
op.drop_column('workbench_workspace_history', 'intend_to_study')
op.drop_column('workbench_workspace_history', 'findings_from_study')
op.drop_column('workbench_workspace', 'reason_for_investigation')
op.drop_column('workbench_workspace', 'intend_to_study')
op.drop_column('workbench_workspace', 'findings_from_study')
op.drop_column('workbench_researcher_history', 'sexual_orientation')
op.drop_column('workbench_researcher_history', 'sex_at_birth')
op.drop_column('workbench_researcher', 'sexual_orientation')
op.drop_column('workbench_researcher', 'sex_at_birth')
op.execute("ALTER TABLE `workbench_researcher` MODIFY `ethnicity` varchar(80);")
op.execute("ALTER TABLE `workbench_researcher_history` MODIFY `ethnicity` varchar(80);")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | aea73577b6f45bb80e6827a9d7c2e2cc | 46.174419 | 120 | 0.627557 | 3.849146 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/relatedperson.py | 1 | 4720 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/RelatedPerson) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class RelatedPerson(domainresource.DomainResource):
""" An person that is related to a patient, but who is not a direct target of
care.
Information about a person that is involved in the care for a patient, but
who is not the target of healthcare, nor has a formal responsibility in the
care process.
"""
resource_type = "RelatedPerson"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.active = None
""" Whether this related person's record is in active use.
Type `bool`. """
self.address = None
""" Address where the related person can be contacted or visited.
List of `Address` items (represented as `dict` in JSON). """
self.birthDate = None
""" The date on which the related person was born.
Type `FHIRDate` (represented as `str` in JSON). """
self.gender = None
""" male | female | other | unknown.
Type `str`. """
self.identifier = None
""" A human identifier for this person.
List of `Identifier` items (represented as `dict` in JSON). """
self.name = None
""" A name associated with the person.
List of `HumanName` items (represented as `dict` in JSON). """
self.patient = None
""" The patient this person is related to.
Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """
self.period = None
""" Period of time that this relationship is considered valid.
Type `Period` (represented as `dict` in JSON). """
self.photo = None
""" Image of the person.
List of `Attachment` items (represented as `dict` in JSON). """
self.relationship = None
""" The nature of the relationship.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.telecom = None
""" A contact detail for the person.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(RelatedPerson, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RelatedPerson, self).elementProperties()
js.extend([
("active", "active", bool, False, None, False),
("address", "address", address.Address, True, None, False),
("birthDate", "birthDate", fhirdate.FHIRDate, False, None, False),
("gender", "gender", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("name", "name", humanname.HumanName, True, None, False),
("patient", "patient", fhirreference.FHIRReference, False, None, True),
("period", "period", period.Period, False, None, False),
("photo", "photo", attachment.Attachment, True, None, False),
("relationship", "relationship", codeableconcept.CodeableConcept, False, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
])
return js
import sys
try:
from . import address
except ImportError:
address = sys.modules[__package__ + '.address']
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import humanname
except ImportError:
humanname = sys.modules[__package__ + '.humanname']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| bsd-3-clause | 6cc6abfc338263cbcf4d777e2afbab79 | 35.589147 | 105 | 0.616525 | 4.267631 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/19c5c975d852_.py | 1 | 2122 | """
Revision ID: 19c5c975d852
Revises: 011b5659ae29
Create Date: 2021-02-26 14:03:21.579534
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '19c5c975d852'
down_revision = '011b5659ae29'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_manifest_file', sa.Column('file_name', sa.String(length=255), nullable=True))
op.create_index(op.f('ix_genomic_manifest_file_file_name'), 'genomic_manifest_file', ['file_name'], unique=False)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_genomic_manifest_file_file_name'), table_name='genomic_manifest_file')
op.drop_column('genomic_manifest_file', 'file_name')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | d37110e1aa64ae76cd82598c4be629d4 | 33.225806 | 125 | 0.744581 | 3.461664 | false | false | false | false |
django/django-localflavor | tests/test_in.py | 3 | 6889 | from django.test import SimpleTestCase
from localflavor.in_.forms import (INAadhaarNumberField, INPANCardNumberFormField, INStateField, INStateSelect,
INZipCodeField)
class INLocalFlavorTests(SimpleTestCase):
maxDiff = None
def test_INPStateSelect(self):
f = INStateSelect()
out = '''<select name="state">
<option value="KA">Karnataka</option>
<option value="AP" selected="selected">Andhra Pradesh</option>
<option value="KL">Kerala</option>
<option value="TN">Tamil Nadu</option>
<option value="MH">Maharashtra</option>
<option value="UP">Uttar Pradesh</option>
<option value="GA">Goa</option>
<option value="GJ">Gujarat</option>
<option value="RJ">Rajasthan</option>
<option value="HP">Himachal Pradesh</option>
<option value="TG">Telangana</option>
<option value="AR">Arunachal Pradesh</option>
<option value="AS">Assam</option>
<option value="BR">Bihar</option>
<option value="CT">Chhattisgarh</option>
<option value="HR">Haryana</option>
<option value="JH">Jharkhand</option>
<option value="MP">Madhya Pradesh</option>
<option value="MN">Manipur</option>
<option value="ML">Meghalaya</option>
<option value="MZ">Mizoram</option>
<option value="NL">Nagaland</option>
<option value="OR">Odisha</option>
<option value="PB">Punjab</option>
<option value="SK">Sikkim</option>
<option value="TR">Tripura</option>
<option value="UT">Uttarakhand</option>
<option value="WB">West Bengal</option>
<option value="AN">Andaman and Nicobar Islands</option>
<option value="CH">Chandigarh</option>
<option value="DH">Dadra and Nagar Haveli and Daman and Diu</option>
<option value="DL">Delhi</option>
<option value="JK">Jammu and Kashmir</option>
<option value="LD">Lakshadweep</option>
<option value="LA">Ladakh</option>
<option value="PY">Puducherry</option>
</select>'''
self.assertHTMLEqual(f.render('state', 'AP'), out)
def test_INZipCodeField(self):
error_format = ['Enter a zip code in the format XXXXXX or XXX XXX.']
valid = {
'360311': '360311',
'360 311': '360311',
}
invalid = {
'36 0311': error_format,
'3603111': error_format,
'360 31': error_format,
'36031': error_format,
'O2B 2R3': error_format
}
self.assertFieldOutput(INZipCodeField, valid, invalid)
def test_INAadhaarNumberField(self):
error_format = ['Enter a valid Aadhaar number in XXXX XXXX XXXX or '
'XXXX-XXXX-XXXX format.']
valid = {
'3603-1178-8988': '3603 1178 8988',
'1892 3114 7727': '1892 3114 7727',
}
invalid = {
'9910 182': error_format,
'3603111': error_format,
'000 0000 0000': error_format,
'0000 0000 0000': error_format,
'18888 8882 8288': error_format
}
self.assertFieldOutput(INAadhaarNumberField, valid, invalid)
def test_INStateField(self):
error_format = ['Enter an Indian state or territory.']
valid = {
'an': 'AN',
'AN': 'AN',
'andaman and nicobar': 'AN',
'andra pradesh': 'AP',
'andrapradesh': 'AP',
'andhrapradesh': 'AP',
'ap': 'AP',
'andhra pradesh': 'AP',
'ar': 'AR',
'arunachal pradesh': 'AR',
'assam': 'AS',
'as': 'AS',
'bihar': 'BR',
'br': 'BR',
'cg': 'CG',
'chhattisgarh': 'CG',
'ch': 'CH',
'chandigarh': 'CH',
'daman and diu': 'DD',
'dd': 'DD',
'dl': 'DL',
'delhi': 'DL',
'dn': 'DN',
'dadra and nagar haveli': 'DN',
'ga': 'GA',
'goa': 'GA',
'gj': 'GJ',
'gujarat': 'GJ',
'himachal pradesh': 'HP',
'hp': 'HP',
'hr': 'HR',
'haryana': 'HR',
'jharkhand': 'JH',
'jh': 'JH',
'jammu and kashmir': 'JK',
'jk': 'JK',
'karnataka': 'KA',
'karnatka': 'KA',
'ka': 'KA',
'kerala': 'KL',
'kl': 'KL',
'ld': 'LD',
'lakshadweep': 'LD',
'maharastra': 'MH',
'mh': 'MH',
'maharashtra': 'MH',
'meghalaya': 'ML',
'ml': 'ML',
'mn': 'MN',
'manipur': 'MN',
'madhya pradesh': 'MP',
'mp': 'MP',
'mizoram': 'MZ',
'mizo': 'MZ',
'mz': 'MZ',
'nl': 'NL',
'nagaland': 'NL',
'orissa': 'OR',
'odisa': 'OR',
'orisa': 'OR',
'or': 'OR',
'pb': 'PB',
'punjab': 'PB',
'py': 'PY',
'pondicherry': 'PY',
'rajasthan': 'RJ',
'rajastan': 'RJ',
'rj': 'RJ',
'sikkim': 'SK',
'sk': 'SK',
'tamil nadu': 'TN',
'tn': 'TN',
'tamilnadu': 'TN',
'tamilnad': 'TN',
'telangana': 'TG',
'tg': 'TG',
'tr': 'TR',
'tripura': 'TR',
'ua': 'UA',
'uttarakhand': 'UA',
'up': 'UP',
'uttar pradesh': 'UP',
'westbengal': 'WB',
'bengal': 'WB',
'wb': 'WB',
'west bengal': 'WB'
}
invalid = {
'florida': error_format,
'FL': error_format,
}
self.assertFieldOutput(INStateField, valid, invalid)
def test_INPANCardNumberField(self):
invalid_pan_error = ['Please enter a valid Indian PAN card number.']
valid = {
'AAAAA1234T':'AAAAA1234T',
'BNZAB2318J':'BNZAB2318J',
'ABCFC1234E':'ABCFC1234E',
'PQRLY1034T':'PQRLY1034T',
'ABHGA1234T':'ABHGA1234T',
'UMZJB2318J':'UMZJB2318J',
'AOCPC1964E':'AOCPC1964E',
'PERTY1934T':'PERTY1934T',
}
invalid = {
'AAADA1234T':invalid_pan_error,
'BNZEB2318J':invalid_pan_error,
'ABCIC1234E':invalid_pan_error,
'PQRQY1034T':invalid_pan_error,
'ABHRA1234T':invalid_pan_error,
'UMZSB2318J':invalid_pan_error,
'23ZAABN18J': invalid_pan_error,
'6060-1234-': invalid_pan_error,
'BNZAA 23184':invalid_pan_error,
'60606-': invalid_pan_error,
'ASSS':invalid_pan_error,
'4000': invalid_pan_error,
'6060-1234': invalid_pan_error,
'60606-': invalid_pan_error,
'ASSS':invalid_pan_error,
}
self.assertFieldOutput(INPANCardNumberFormField, valid, invalid)
| bsd-3-clause | 365a3cad26dda50ad2635ec9c15aa122 | 31.961722 | 111 | 0.496298 | 3.132788 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/group.py | 1 | 7440 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/Group) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class Group(domainresource.DomainResource):
""" Group of multiple entities.
Represents a defined collection of entities that may be discussed or acted
upon collectively but which are not expected to act collectively and are
not formally or legally recognized; i.e. a collection of entities that
isn't an Organization.
"""
resource_type = "Group"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.active = None
""" Whether this group's record is in active use.
Type `bool`. """
self.actual = None
""" Descriptive or actual.
Type `bool`. """
self.characteristic = None
""" Trait of group members.
List of `GroupCharacteristic` items (represented as `dict` in JSON). """
self.code = None
""" Kind of Group members.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.identifier = None
""" Unique id.
List of `Identifier` items (represented as `dict` in JSON). """
self.member = None
""" Who or what is in group.
List of `GroupMember` items (represented as `dict` in JSON). """
self.name = None
""" Label for Group.
Type `str`. """
self.quantity = None
""" Number of members.
Type `int`. """
self.type = None
""" person | animal | practitioner | device | medication | substance.
Type `str`. """
super(Group, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Group, self).elementProperties()
js.extend([
("active", "active", bool, False, None, False),
("actual", "actual", bool, False, None, True),
("characteristic", "characteristic", GroupCharacteristic, True, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("member", "member", GroupMember, True, None, False),
("name", "name", str, False, None, False),
("quantity", "quantity", int, False, None, False),
("type", "type", str, False, None, True),
])
return js
from . import backboneelement
class GroupCharacteristic(backboneelement.BackboneElement):
""" Trait of group members.
Identifies the traits shared by members of the group.
"""
resource_type = "GroupCharacteristic"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Kind of characteristic.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.exclude = None
""" Group includes or excludes.
Type `bool`. """
self.period = None
""" Period over which characteristic is tested.
Type `Period` (represented as `dict` in JSON). """
self.valueBoolean = None
""" Value held by characteristic.
Type `bool`. """
self.valueCodeableConcept = None
""" Value held by characteristic.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueQuantity = None
""" Value held by characteristic.
Type `Quantity` (represented as `dict` in JSON). """
self.valueRange = None
""" Value held by characteristic.
Type `Range` (represented as `dict` in JSON). """
super(GroupCharacteristic, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(GroupCharacteristic, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, True),
("exclude", "exclude", bool, False, None, True),
("period", "period", period.Period, False, None, False),
("valueBoolean", "valueBoolean", bool, False, "value", True),
("valueCodeableConcept", "valueCodeableConcept", codeableconcept.CodeableConcept, False, "value", True),
("valueQuantity", "valueQuantity", quantity.Quantity, False, "value", True),
("valueRange", "valueRange", range.Range, False, "value", True),
])
return js
class GroupMember(backboneelement.BackboneElement):
""" Who or what is in group.
Identifies the resource instances that are members of the group.
"""
resource_type = "GroupMember"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.entity = None
""" Reference to the group member.
Type `FHIRReference` referencing `Patient, Practitioner, Device, Medication, Substance` (represented as `dict` in JSON). """
self.inactive = None
""" If member is no longer in group.
Type `bool`. """
self.period = None
""" Period member belonged to the group.
Type `Period` (represented as `dict` in JSON). """
super(GroupMember, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(GroupMember, self).elementProperties()
js.extend([
("entity", "entity", fhirreference.FHIRReference, False, None, True),
("inactive", "inactive", bool, False, None, False),
("period", "period", period.Period, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
| bsd-3-clause | 23f8e245be70824034aa457d43684182 | 34.598086 | 132 | 0.597715 | 4.366197 | false | false | false | false |
all-of-us/raw-data-repository | ci/old_scripts/release_notes.py | 1 | 7516 | #!/usr/bin/env python
"""Generates release notes for a given deployed AppEngine environment based on code at HEAD.
We expect deployed versions to be named with a git tag like v0-1-rc14. This script gets a project's
current version, then gets commit messages from that tag to HEAD, and formats the messages (with
JIRA's style of markup) to make release notes. HEAD is assumed (but not required) to also be tagged
for the release that's going out / the release for which we're generating notes.
This requires the
JIRA_API_USER_PASSWORD and
JIRA_API_USER_NAME
environment variables to be set. If it is also set, the comma-separated list of JIRA user names in
JIRA_WATCHER_NAMES
will be set as watchers on newly created release trackers.
"""
import http.client
import json
import logging
import os
import re
import subprocess
import sys
import urllib.request, urllib.error, urllib.parse
import jira
_REPO_BASE_URL = "https://github.com/all-of-us/raw-data-repository"
# Git tags of this format denote releases. Final letter suffixes denote cherry-picks.
# This should match config.yml.
_RELEASE_TAG_RE = re.compile(r"[0-9]+(?:\.[0-9]+)*\.[0-9]+[a-z]*$")
_CHERRY_PICK_RE = re.compile(r"(.*\.[0-9]+)([a-z]*)")
# Formatting for release notes in JIRA comments.
# Note that JIRA auto-linkifies JIRA IDs, so avoid using commit message text in a link.
_LOG_LINE_FORMAT = "--format=* [%aN %ad|" + _REPO_BASE_URL + "/commit/%h] %s"
# Overall release notes template.
_RELEASE_NOTES_T = """h1. Release Notes for %(current)s
h2. deployed to %(project)s, listing changes since %(prev)s
%(history)s
"""
_JIRA_INSTANCE_URL = "https://precisionmedicineinitiative.atlassian.net/"
_JIRA_PROJECT_ID = "PD"
_JIRA_NAME_VARNAME = "JIRA_API_USER_NAME"
_JIRA_PASSWORD_VARNAME = "JIRA_API_USER_PASSWORD"
_JIRA_WATCHERS_VARNAME = "JIRA_WATCHER_NAMES"
def _linkify_pull_request_ids(text):
"""Converts all substrings like "(#123)" to links to pull requests."""
return re.sub(r"\(#([0-9]+)\)", r"([#\1|%s/pull/\1])" % _REPO_BASE_URL, text)
def _get_deployed_version(project_id):
"""Queries the app's version API (root path) for the currently serving version ID.
This assumes app versions are of the form "<git tag>.<extra AppEngine identifier>", for example
"v0-1-rc14.399070594381287524".
Args:
project_id: AppEngine project ID, for example all-of-us-rdr-staging. The appspot URL is
constructed from this ID.
Returns:
A version ID / git tag of the currently serving code in the given environment.
"""
version_url = "https://%s.appspot.com/" % project_id
response = urllib.request.urlopen(version_url)
if response.getcode() != http.client.OK:
raise RuntimeError("HTTP %d for %r" % (response.getcode(), version_url))
app_version = json.loads(response.read())["version_id"]
return app_version.split(".")[0]
def _get_release_notes_since_tag(deployed_tag, project_id, current_tag):
"""Formats release notes for JIRA from commit messages, from the given tag to HEAD."""
process = subprocess.Popen(["git", "log", deployed_tag + "..", _LOG_LINE_FORMAT], stdout=subprocess.PIPE)
if process.wait() != 0:
raise RuntimeError("Getting commit messages failed.")
stdout, _ = process.communicate()
commit_messages = stdout.decode("utf8") # Keep further text processing from choking on non-ASCII.
return _RELEASE_NOTES_T % {
"current": current_tag,
"project": project_id,
"prev": deployed_tag,
"history": _linkify_pull_request_ids(commit_messages),
}
def _find_current_commit_tag():
"""Returns the current git tag (or tag + short commit hash) of the current commit."""
process = subprocess.Popen(["git", "describe", "--tags"], stdout=subprocess.PIPE)
if process.wait() != 0:
raise RuntimeError("Getting current tag.")
stdout, _ = process.communicate()
tag = stdout.strip()
return tag
def _connect_to_jira():
"""Opens a JIRA API connection based on username/pw from env vars."""
for varname in (_JIRA_PASSWORD_VARNAME, _JIRA_NAME_VARNAME):
if varname not in os.environ:
raise RuntimeError("No environment variable value for %r." % varname)
return jira.JIRA(_JIRA_INSTANCE_URL, basic_auth=(os.getenv(_JIRA_NAME_VARNAME), os.getenv(_JIRA_PASSWORD_VARNAME)))
def _strip_cherry_pick(version_id):
"""Returns a tuple of (version ID without cherry-pick suffix, boolean is_cherry_pick)."""
match = _CHERRY_PICK_RE.search(version_id)
if match is None:
# Not a recognized format, don't try to parse it.
return version_id, False
else:
return match.group(1), bool(match.group(2))
def _get_watchers():
watchers = set()
for name in [n.strip() for n in os.getenv(_JIRA_WATCHERS_VARNAME, "").split(",")]:
if name:
watchers.add(name)
return watchers
def _update_or_create_release_tracker(jira_connection, full_version_id, release_notes):
"""Adds release notes to a new or existing JIRA issue."""
version_id, is_cherry_pick = _strip_cherry_pick(full_version_id)
summary = "Release tracker for %s" % version_id
issues = jira_connection.search_issues(
'project = "%s" AND summary ~ "%s" ORDER BY created DESC' % (_JIRA_PROJECT_ID, summary)
)
if issues:
if len(issues) > 1:
logging.warning(
"Found multiple release tracker matches, using newest. %s",
", ".join("[%s] %s" % (issue.key, issue.fields().summary) for issue in issues),
)
issue = issues[0]
jira_connection.add_comment(issue, release_notes)
what_happened = "Updated"
else:
if is_cherry_pick:
logging.warning(
"Expected %r to exist since %s looks like a cherry-pick. Creating a new issue instead.",
summary,
full_version_id,
)
issue = jira_connection.create_issue(
project=_JIRA_PROJECT_ID, summary=summary, description=release_notes, issuetype={"name": "Task"}
)
for watcher_username in _get_watchers():
try:
jira_connection.add_watcher(issue, watcher_username)
except jira.exceptions.JIRAError as e:
logging.warning("Skipping invalid watcher %r (got %s).", watcher_username, e.status_code)
what_happened = "Created"
logging.info("%s [%s] with release notes for %s.", what_happened, issue.key, full_version_id)
def main():
"""Looks up version tags, gets commit logs, and publishes release in JIRA."""
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) != 2:
logging.critical("Usage: %s appengine_project_id", sys.argv[0])
sys.exit(1)
project_id = sys.argv[1]
deployed_version = _get_deployed_version(project_id)
if not _RELEASE_TAG_RE.match(deployed_version):
logging.warning("Tag %r from %r does not look like a release tag.", deployed_version, project_id)
current_version = _find_current_commit_tag()
if not _RELEASE_TAG_RE.match(current_version):
logging.warning("Current tag %r does not look like a release tag.", current_version)
jira_connection = _connect_to_jira()
release_notes = _get_release_notes_since_tag(deployed_version, project_id, current_version)
logging.info(release_notes)
_update_or_create_release_tracker(jira_connection, current_version, release_notes)
if __name__ == "__main__":
main()
| bsd-3-clause | 1b50adf29496757c4a45e352f2663498 | 39.627027 | 119 | 0.664715 | 3.468389 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/genomic/validation.py | 1 | 3509 | import collections
import datetime
import functools
import operator
from rdr_service import clock
from rdr_service.dao.genomics_dao import GenomicSetDao
from rdr_service.participant_enums import SampleStatus, WithdrawalStatus
from rdr_service.genomic_enums import GenomicSetStatus, GenomicSetMemberStatus, GenomicValidationFlag
GENOMIC_VALID_SEX_AT_BIRTH_VALUES = ["F", "M"]
GENOMIC_VALID_AGE = 18
GENOMIC_VALID_CONSENT_CUTOFF = datetime.datetime(2018, 4, 24)
GENOMIC_VALID_SAMPLE_STATUSES = [SampleStatus.RECEIVED]
def validate_and_update_genomic_set_by_id(genomic_set_id, dao=None):
"""
Determine and write validation statuses and times for the specified GenomicSet and all of it's
GenomicSetMembers in a single transaction.
:param genomic_set_id: The id of the GenomicSet to validate
:param dao: (optional)
:type dao: GenomicSetDao or None
"""
now = clock.CLOCK.now()
date_of_birth_cutoff = datetime.date(year=now.year - GENOMIC_VALID_AGE, month=now.month, day=now.day)
dao = dao or GenomicSetDao()
update_queue = collections.deque()
with dao.member_dao.session() as session:
try:
for row in dao.iter_validation_data_for_genomic_set_id_with_session(session, genomic_set_id):
flags = list(_iter_validation_flags(row, date_of_birth_cutoff))
status = GenomicSetMemberStatus.INVALID if len(flags) > 0 else GenomicSetMemberStatus.VALID
update_queue.append(dao.member_dao.BulkUpdateValidationParams(row.id, status, flags))
dao.member_dao.bulk_update_validation_status_with_session(session, update_queue)
genomic_set = dao.get_with_session(session, genomic_set_id)
if any([task.status == GenomicSetMemberStatus.INVALID for task in update_queue]):
genomic_set.genomicSetStatus = GenomicSetStatus.INVALID
else:
genomic_set.genomicSetStatus = GenomicSetStatus.VALID
genomic_set.validatedTime = now
dao.update_with_session(session, genomic_set)
except Exception:
session.rollback()
raise
def _iter_validation_flags(row, date_of_birth_cutoff):
"""
Iterate all GenomicValidationFlag that apply to the given row
:param row: a Row from GenomicSet.iter_validation_data_for_genomic_set_id_with_session().
Rows contain all fields of GenomicSetMember along with any calculated fields necessary
for this validation.
:param date_of_birth_cutoff: any birth date before dob_cutoff will be invalid
"""
if not row.consent_time or row.consent_time < GENOMIC_VALID_CONSENT_CUTOFF:
yield GenomicValidationFlag.INVALID_CONSENT
if row.withdrawal_status != WithdrawalStatus.NOT_WITHDRAWN:
yield GenomicValidationFlag.INVALID_WITHDRAW_STATUS
if row.sex_at_birth not in GENOMIC_VALID_SEX_AT_BIRTH_VALUES:
yield GenomicValidationFlag.INVALID_SEX_AT_BIRTH
if not row.birth_date or row.birth_date > date_of_birth_cutoff:
yield GenomicValidationFlag.INVALID_AGE
if not (
row.samples_to_isolate_dna in GENOMIC_VALID_SAMPLE_STATUSES
and any(
map(
functools.partial(operator.contains, GENOMIC_VALID_SAMPLE_STATUSES),
[row.sample_status_1ED04, row.sample_status_1SAL2],
)
)
):
yield GenomicValidationFlag.INVALID_BIOBANK_ORDER
if not row.zip_code:
yield GenomicValidationFlag.INVALID_NY_ZIPCODE
| bsd-3-clause | 0c8f61a0f944a0c917686b1ed0e92833 | 42.320988 | 107 | 0.701339 | 3.45374 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/rdr_client/pairing_assigner.py | 1 | 5499 | """Assigns participants with the specified IDs to the organization.
Usage:
./rdr_client/run_client.sh --project all-of-us-rdr-prod --account $USER@pmi-ops.org \
pairing_assigner.py file.csv --pairing [site|organization|awardee] \
[--dry_run] [--override_site]
Where site = google_group, organization = external_id, awardee = name.
The CSV contains lines with P12345678,NEW_ORGANIZATION (no headers necessary). e.g.:
Example awardees:
P11111111,AZ_TUCSON
P22222222,AZ_TUCSON
P99999999,PITT
P00000000,PITT
Example sites:
P11111111,hpo-site-monroeville
P22222222,hpo-site-phoenix
P99999999,hpo-site-tucson
P00000000,hpo-site-pitt
"""
import csv
import logging
import sys
from rdr_service.main_util import configure_logging, get_parser
from rdr_service.rdr_client.client import Client, HttpException, client_log
def main(client):
num_no_change = 0
num_updates = 0
num_errors = 0
pairing_list = ["site", "organization", "awardee"]
pairing_key = client.args.pairing
if client.args.pairing not in pairing_list:
sys.exit("Pairing must be one of site|organization|awardee")
with open(client.args.file) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
try:
participant_id, new_pairing = [v.strip() for v in line]
except ValueError as e:
logging.error("Skipping invalid line %d (parsed as %r): %s.", reader.line_num, line, e)
num_errors += 1
continue
if not (new_pairing and participant_id):
logging.warning(
"Skipping invalid line %d: missing new_pairing (%r) or participant (%r).",
reader.line_num,
new_pairing,
participant_id,
)
num_errors += 1
continue
if not participant_id.startswith("P"):
logging.error(
"Malformed participant ID from line %d: %r does not start with P.", reader.line_num, participant_id
)
num_errors += 1
continue
try:
participant = client.request_json("Participant/%s" % participant_id)
except HttpException as e:
logging.error("Skipping %s: %s", participant_id, e)
num_errors += 1
continue
old_pairing = _get_old_pairing(participant, pairing_key)
if new_pairing == old_pairing:
num_no_change += 1
logging.info("%s unchanged (already %s)", participant_id, old_pairing)
continue
if not client.args.override_site:
if participant.get("site") and participant["site"] != "UNSET":
logging.info(
"Skipping participant %s already paired with site %s" % (participant_id, participant["site"])
)
continue
if client.args.no_awardee_change:
if participant.get("awardee") and participant["awardee"] != "UNSET":
if not new_pairing.startswith(participant["awardee"]):
logging.info(
"Skipping participant %s where pairing %s does not begin with old awardee %s"
% (participant_id, new_pairing, participant["awardee"])
)
continue
logging.info("%s %s => %s", participant_id, old_pairing, new_pairing)
if new_pairing == "UNSET":
for i in pairing_list:
participant[i] = "UNSET"
participant["providerLink"] = []
else:
for i in pairing_list:
del participant[i]
participant[pairing_key] = new_pairing
if client.args.dry_run:
logging.info("Dry run, would update participant[%r] to %r.", pairing_key, new_pairing)
else:
client.request_json(
"Participant/%s" % participant_id, "PUT", participant, headers={"If-Match": client.last_etag}
)
num_updates += 1
logging.info(
"%s %d participants, %d unchanged, %d errors.",
"Would update" if client.args.dry_run else "Updated",
num_updates,
num_no_change,
num_errors,
)
def _get_old_pairing(participant, pairing_key):
old_pairing = participant[pairing_key]
if not old_pairing:
return "UNSET"
return old_pairing
if __name__ == "__main__":
configure_logging()
client_log.setLevel(logging.WARN) # Suppress the log of HTTP requests.
arg_parser = get_parser()
arg_parser.add_argument("file", help="The name of file containing the list of HPOs and participant IDs")
arg_parser.add_argument("--dry_run", action="store_true")
arg_parser.add_argument(
"--pairing", help="set level of pairing as one of" "[site|organization|awardee]", required=True
)
arg_parser.add_argument(
"--override_site", help="Update pairings on participants that have a site pairing already", action="store_true"
)
arg_parser.add_argument(
"--no_awardee_change",
help="Do not re-pair participants if the awardee is changing; " + "just log that it happened",
action="store_true",
)
main(Client(parser=arg_parser))
| bsd-3-clause | 14449a8216b1d1a6a67b7b3ecda1aa52 | 36.924138 | 119 | 0.571377 | 3.872535 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/ce0d4837ba00_add_deleted_flag_for_genomic_metrics.py | 1 | 4812 | """add deleted flag for genomic metrics
Revision ID: ce0d4837ba00
Revises: a41c2f2266cb
Create Date: 2021-02-17 13:31:28.006077
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'ce0d4837ba00'
down_revision = 'a41c2f2266cb'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_gc_validation_metrics', sa.Column('crai_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('cram_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('cram_md5_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('hf_vcf_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('hf_vcf_md5_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('hf_vcf_tbi_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('idat_green_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('idat_green_md5_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('idat_red_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('idat_red_md5_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('raw_vcf_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('raw_vcf_md5_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('raw_vcf_tbi_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('vcf_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('vcf_md5_deleted', sa.SmallInteger(), nullable=False))
op.add_column('genomic_gc_validation_metrics', sa.Column('vcf_tbi_deleted', sa.SmallInteger(), nullable=False))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_gc_validation_metrics', 'vcf_tbi_deleted')
op.drop_column('genomic_gc_validation_metrics', 'vcf_md5_deleted')
op.drop_column('genomic_gc_validation_metrics', 'vcf_deleted')
op.drop_column('genomic_gc_validation_metrics', 'raw_vcf_tbi_deleted')
op.drop_column('genomic_gc_validation_metrics', 'raw_vcf_md5_deleted')
op.drop_column('genomic_gc_validation_metrics', 'raw_vcf_deleted')
op.drop_column('genomic_gc_validation_metrics', 'idat_red_md5_deleted')
op.drop_column('genomic_gc_validation_metrics', 'idat_red_deleted')
op.drop_column('genomic_gc_validation_metrics', 'idat_green_md5_deleted')
op.drop_column('genomic_gc_validation_metrics', 'idat_green_deleted')
op.drop_column('genomic_gc_validation_metrics', 'hf_vcf_tbi_deleted')
op.drop_column('genomic_gc_validation_metrics', 'hf_vcf_md5_deleted')
op.drop_column('genomic_gc_validation_metrics', 'hf_vcf_deleted')
op.drop_column('genomic_gc_validation_metrics', 'cram_md5_deleted')
op.drop_column('genomic_gc_validation_metrics', 'cram_deleted')
op.drop_column('genomic_gc_validation_metrics', 'crai_deleted')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 2e7c49392fa57fd383c4a2a7a1fe7466 | 52.466667 | 125 | 0.738362 | 3.220884 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/37bac919059e_removing_stored_sample_tmp_table.py | 1 | 3832 | """removing stored sample tmp table
Revision ID: 37bac919059e
Revises: 061dc1adde35
Create Date: 2021-03-10 16:13:28.497174
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '37bac919059e'
down_revision = '061dc1adde35'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
op.drop_index(op.f('ix_biobank_stored_sample_status_import_tmp_sample_id'), table_name='biobank_stored_sample_status_import_tmp')
op.drop_index(op.f('ix_biobank_stored_sample_status_import_tmp_external_participant_id'), table_name='biobank_stored_sample_status_import_tmp')
op.drop_table('biobank_stored_sample_status_import_tmp')
def downgrade_rdr():
op.create_table('biobank_stored_sample_status_import_tmp',
sa.Column('sample_family_id', sa.String(length=80), nullable=True),
sa.Column('sample_id', sa.String(length=80), nullable=True),
sa.Column('sample_storage_status', sa.String(length=80), nullable=True),
sa.Column('sample_type', sa.String(length=80), nullable=True),
sa.Column('parent_expected_volume', sa.String(length=80), nullable=True),
sa.Column('sample_quantity', sa.String(length=80), nullable=True),
sa.Column('sample_container_type', sa.String(length=80), nullable=True),
sa.Column('sample_family_collection_date', rdr_service.model.utils.UTCDateTime(), nullable=True),
sa.Column('sample_disposal_status', sa.String(length=80), nullable=True),
sa.Column('sample_disposed_date', rdr_service.model.utils.UTCDateTime(), nullable=True),
sa.Column('parent_sample_id', sa.String(length=80), nullable=True),
sa.Column('sample_confirmed_date', rdr_service.model.utils.UTCDateTime(), nullable=True),
sa.Column('external_participant_id', sa.String(length=80), nullable=True),
sa.Column('test_code', sa.String(length=80), nullable=True),
sa.Column('sample_treatment', sa.String(length=80), nullable=True),
sa.Column('sample_family_create_date', rdr_service.model.utils.UTCDateTime(), nullable=True),
sa.Column('sent_order_id', sa.String(length=80), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_biobank_stored_sample_status_import_tmp_external_participant_id'), 'biobank_stored_sample_status_import_tmp', ['external_participant_id'], unique=False)
op.create_index(op.f('ix_biobank_stored_sample_status_import_tmp_sample_id'), 'biobank_stored_sample_status_import_tmp', ['sample_id'], unique=False)
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 88388553409cfdc51e2eeea30a999151 | 46.9 | 181 | 0.731211 | 3.430618 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/model/onsite_id_verification.py | 1 | 2071 | from sqlalchemy import Column, Integer, String, JSON, ForeignKey, event
from sqlalchemy.ext.declarative import declared_attr
from rdr_service.model.utils import Enum, UTCDateTime6
from rdr_service.model.base import Base, model_insert_listener, model_update_listener
from rdr_service.participant_enums import OnSiteVerificationType, OnSiteVerificationVisitType
class OnsiteIdVerification(Base):
"""participant onsite ID verification histories"""
__tablename__ = "onsite_id_verification"
id = Column("id", Integer, primary_key=True, autoincrement=True, nullable=False)
created = Column("created", UTCDateTime6, nullable=True)
"""The create time for this record."""
modified = Column("modified", UTCDateTime6, nullable=True)
"""The last modified time for this record."""
participantId = Column("participant_id", Integer, ForeignKey("participant.participant_id"), nullable=False)
"""
Participant id for the on site participant
"""
userEmail = Column("user_email", String(200))
"""
Email address for the on site participant
"""
@declared_attr
def siteId(cls):
"""The site id for the on site verification event"""
return Column("site_id", Integer, ForeignKey("site.site_id"))
verifiedTime = Column("verified_time", UTCDateTime6, nullable=False)
"""
On site verification event time
"""
verificationType = Column("verification_type", Enum(OnSiteVerificationType), nullable=False)
"""
Indicates the on site verification types
:ref:`Enumerated values <verification_type>`
"""
visitType = Column("visit_type", Enum(OnSiteVerificationVisitType), nullable=False)
"""
Indicates the on site verification visit types
:ref:`Enumerated values <visit_type>`
"""
resource = Column("resource", JSON)
"""Original resource value; whole payload request that was sent from the requester"""
event.listen(OnsiteIdVerification, "before_insert", model_insert_listener)
event.listen(OnsiteIdVerification, "before_update", model_update_listener)
| bsd-3-clause | 2d73ce51883b100c94e431dfee8afec3 | 39.607843 | 111 | 0.720425 | 4.235174 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/model/resource_schema.py | 1 | 1289 |
from sqlalchemy import event, Column, ForeignKey, UniqueConstraint, BigInteger, String, Index
from sqlalchemy.dialects.mysql import JSON
from rdr_service.model.base import Base, model_insert_listener, model_update_listener
from rdr_service.model.utils import UTCDateTime6
class ResourceSchema(Base):
"""
Resource Schema
"""
__tablename__ = "resource_schema"
# Primary Key
id = Column("id", BigInteger, primary_key=True, autoincrement=True, nullable=False)
# have mysql set the creation data for each new order
created = Column("created", UTCDateTime6, nullable=True)
# have mysql always update the modified data when the record is changed
modified = Column("modified", UTCDateTime6, nullable=True)
resourceTypeID = Column("resource_type_id", ForeignKey("resource_type.id"), nullable=False)
schema = Column("schema", JSON, nullable=False)
schemaHash = Column("schema_hash", String(64), default='', nullable=False)
__table_args__ = (
UniqueConstraint("resource_type_id", "modified"),
)
Index("ix_res_type_schema_hash", ResourceSchema.resourceTypeID, ResourceSchema.schemaHash)
event.listen(ResourceSchema, "before_insert", model_insert_listener)
event.listen(ResourceSchema, "before_update", model_update_listener)
| bsd-3-clause | 131333a0fa3627011f48600c0c4def0f | 39.28125 | 95 | 0.734678 | 3.966154 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/questionnaireresponse.py | 1 | 10757 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/QuestionnaireResponse) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class QuestionnaireResponse(domainresource.DomainResource):
""" A structured set of questions and their answers.
A structured set of questions and their answers. The questions are ordered
and grouped into coherent subsets, corresponding to the structure of the
grouping of the questionnaire being responded to.
"""
resource_type = "QuestionnaireResponse"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.author = None
""" Person who received and recorded the answers.
Type `FHIRReference` referencing `Device, Practitioner, Patient, RelatedPerson` (represented as `dict` in JSON). """
self.authored = None
""" Date the answers were gathered.
Type `FHIRDate` (represented as `str` in JSON). """
self.basedOn = None
""" Request fulfilled by this QuestionnaireResponse.
List of `FHIRReference` items referencing `ReferralRequest, CarePlan, ProcedureRequest` (represented as `dict` in JSON). """
self.context = None
""" Encounter or Episode during which questionnaire was completed.
Type `FHIRReference` referencing `Encounter, EpisodeOfCare` (represented as `dict` in JSON). """
self.identifier = None
""" Unique id for this set of answers.
Type `Identifier` (represented as `dict` in JSON). """
self.item = None
""" Groups and questions.
List of `QuestionnaireResponseItem` items (represented as `dict` in JSON). """
self.parent = None
""" Part of this action.
List of `FHIRReference` items referencing `Observation, Procedure` (represented as `dict` in JSON). """
self.questionnaire = None
""" Form being answered.
Type `FHIRReference` referencing `Questionnaire` (represented as `dict` in JSON). """
self.source = None
""" The person who answered the questions.
Type `FHIRReference` referencing `Patient, Practitioner, RelatedPerson` (represented as `dict` in JSON). """
self.status = None
""" in-progress | completed | amended | entered-in-error | stopped.
Type `str`. """
self.subject = None
""" The subject of the questions.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
super(QuestionnaireResponse, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(QuestionnaireResponse, self).elementProperties()
js.extend([
("author", "author", fhirreference.FHIRReference, False, None, False),
("authored", "authored", fhirdate.FHIRDate, False, None, False),
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("context", "context", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("item", "item", QuestionnaireResponseItem, True, None, False),
("parent", "parent", fhirreference.FHIRReference, True, None, False),
("questionnaire", "questionnaire", fhirreference.FHIRReference, False, None, False),
("source", "source", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
])
return js
from . import backboneelement
class QuestionnaireResponseItem(backboneelement.BackboneElement):
""" Groups and questions.
A group or question item from the original questionnaire for which answers
are provided.
"""
resource_type = "QuestionnaireResponseItem"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.answer = None
""" The response(s) to the question.
List of `QuestionnaireResponseItemAnswer` items (represented as `dict` in JSON). """
self.definition = None
""" ElementDefinition - details for the item.
Type `str`. """
self.item = None
""" Nested questionnaire response items.
List of `QuestionnaireResponseItem` items (represented as `dict` in JSON). """
self.linkId = None
""" Pointer to specific item from Questionnaire.
Type `str`. """
self.subject = None
""" The subject this group's answers are about.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.text = None
""" Name for group or question text.
Type `str`. """
super(QuestionnaireResponseItem, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(QuestionnaireResponseItem, self).elementProperties()
js.extend([
("answer", "answer", QuestionnaireResponseItemAnswer, True, None, False),
("definition", "definition", str, False, None, False),
("item", "item", QuestionnaireResponseItem, True, None, False),
("linkId", "linkId", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("text", "text", str, False, None, False),
])
return js
class QuestionnaireResponseItemAnswer(backboneelement.BackboneElement):
""" The response(s) to the question.
The respondent's answer(s) to the question.
"""
resource_type = "QuestionnaireResponseItemAnswer"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.item = None
""" Nested groups and questions.
List of `QuestionnaireResponseItem` items (represented as `dict` in JSON). """
self.valueAttachment = None
""" Single-valued answer to the question.
Type `Attachment` (represented as `dict` in JSON). """
self.valueBoolean = None
""" Single-valued answer to the question.
Type `bool`. """
self.valueCoding = None
""" Single-valued answer to the question.
Type `Coding` (represented as `dict` in JSON). """
self.valueDate = None
""" Single-valued answer to the question.
Type `FHIRDate` (represented as `str` in JSON). """
self.valueDateTime = None
""" Single-valued answer to the question.
Type `FHIRDate` (represented as `str` in JSON). """
self.valueDecimal = None
""" Single-valued answer to the question.
Type `float`. """
self.valueInteger = None
""" Single-valued answer to the question.
Type `int`. """
self.valueQuantity = None
""" Single-valued answer to the question.
Type `Quantity` (represented as `dict` in JSON). """
self.valueReference = None
""" Single-valued answer to the question.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.valueString = None
""" Single-valued answer to the question.
Type `str`. """
self.valueTime = None
""" Single-valued answer to the question.
Type `FHIRDate` (represented as `str` in JSON). """
self.valueUri = None
""" Single-valued answer to the question.
Type `str`. """
super(QuestionnaireResponseItemAnswer, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(QuestionnaireResponseItemAnswer, self).elementProperties()
js.extend([
("item", "item", QuestionnaireResponseItem, True, None, False),
("valueAttachment", "valueAttachment", attachment.Attachment, False, "value", False),
("valueBoolean", "valueBoolean", bool, False, "value", False),
("valueCoding", "valueCoding", coding.Coding, False, "value", False),
("valueDate", "valueDate", fhirdate.FHIRDate, False, "value", False),
("valueDateTime", "valueDateTime", fhirdate.FHIRDate, False, "value", False),
("valueDecimal", "valueDecimal", float, False, "value", False),
("valueInteger", "valueInteger", int, False, "value", False),
("valueQuantity", "valueQuantity", quantity.Quantity, False, "value", False),
("valueReference", "valueReference", fhirreference.FHIRReference, False, "value", False),
("valueString", "valueString", str, False, "value", False),
("valueTime", "valueTime", fhirdate.FHIRDate, False, "value", False),
("valueUri", "valueUri", str, False, "value", False),
])
return js
import sys
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| bsd-3-clause | 456d95f8636d93f2cb4284e2fcba3037 | 39.746212 | 132 | 0.615041 | 4.369212 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/api/biobank_order_api.py | 1 | 1705 | from flask import request
from rdr_service.api.base_api import UpdatableApi
from rdr_service.api_util import PTC_AND_HEALTHPRO
from rdr_service.app_util import auth_required
from rdr_service.dao.biobank_order_dao import BiobankOrderDao
class BiobankOrderApi(UpdatableApi):
def __init__(self):
super(BiobankOrderApi, self).__init__(BiobankOrderDao(), get_returns_children=True)
@auth_required(PTC_AND_HEALTHPRO)
def post(self, p_id):
return super(BiobankOrderApi, self).post(participant_id=p_id)
@auth_required(PTC_AND_HEALTHPRO)
def get(self, p_id=None, bo_id=None): # pylint: disable=unused-argument
return super(BiobankOrderApi, self).get(id_=bo_id, participant_id=p_id)
@auth_required(PTC_AND_HEALTHPRO)
def put(self, p_id, bo_id): # pylint: disable=unused-argument
return super(BiobankOrderApi, self).put(bo_id, participant_id=p_id)
@auth_required(PTC_AND_HEALTHPRO)
def patch(self, p_id, bo_id): # pylint: disable=unused-argument
return super(BiobankOrderApi, self).patch(bo_id)
def list(self, participant_id):
kwargs = {
'participant_id': participant_id,
'kit_id': request.args.get('kitId'),
'state': request.args.get('state'),
'city': request.args.get('city'),
'zip_code': request.args.get('zipCode'),
'start_date': request.args.get('startDate'),
'end_date': request.args.get('endDate'),
'origin': request.args.get('origin'),
'page': request.args.get('page'),
'page_size': request.args.get('pageSize')
}
return BiobankOrderDao().handle_list_queries(**kwargs)
| bsd-3-clause | 1ce3627d294df24c5193b9ccb1f14e7b | 36.065217 | 91 | 0.646921 | 3.18097 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/eligibilityresponse.py | 1 | 3429 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/EligibilityResponse) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class EligibilityResponse(domainresource.DomainResource):
""" EligibilityResponse resource.
This resource provides eligibility and plan details from the processing of
an Eligibility resource.
"""
resource_name = "EligibilityResponse"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.created = None
""" Creation date.
Type `FHIRDate` (represented as `str` in JSON). """
self.disposition = None
""" Disposition Message.
Type `str`. """
self.identifier = None
""" Business Identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.organization = None
""" Insurer.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.originalRuleset = None
""" Original version.
Type `Coding` (represented as `dict` in JSON). """
self.outcome = None
""" complete | error.
Type `str`. """
self.request = None
""" Claim reference.
Type `FHIRReference` referencing `EligibilityRequest` (represented as `dict` in JSON). """
self.requestOrganization = None
""" Responsible organization.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.requestProvider = None
""" Responsible practitioner.
Type `FHIRReference` referencing `Practitioner` (represented as `dict` in JSON). """
self.ruleset = None
""" Resource version.
Type `Coding` (represented as `dict` in JSON). """
super(EligibilityResponse, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(EligibilityResponse, self).elementProperties()
js.extend([
("created", "created", fhirdate.FHIRDate, False, None, False),
("disposition", "disposition", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("organization", "organization", fhirreference.FHIRReference, False, None, False),
("originalRuleset", "originalRuleset", coding.Coding, False, None, False),
("outcome", "outcome", str, False, None, False),
("request", "request", fhirreference.FHIRReference, False, None, False),
("requestOrganization", "requestOrganization", fhirreference.FHIRReference, False, None, False),
("requestProvider", "requestProvider", fhirreference.FHIRReference, False, None, False),
("ruleset", "ruleset", coding.Coding, False, None, False),
])
return js
from . import coding
from . import fhirdate
from . import fhirreference
from . import identifier
| bsd-3-clause | b74142ac30c59f4146ca342d46861055 | 37.52809 | 110 | 0.614465 | 4.351523 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/tools/tool_libs/genomic_datagen.py | 1 | 11364 | #! /bin/env python
#
# Template for RDR tool python program.
#
import argparse
import csv
import logging
import os
import sys
from rdr_service import clock
from rdr_service.dao.genomic_datagen_dao import GenomicDataGenRunDao
from rdr_service.services.genomic_datagen import ParticipantGenerator, GeneratorOutputTemplate, ManifestGenerator
from rdr_service.services.system_utils import setup_logging, setup_i18n
from rdr_service.tools.tool_libs import GCPProcessContext
from rdr_service.tools.tool_libs.tool_base import ToolBase
_logger = logging.getLogger("rdr_logger")
# Tool_cmd and tool_desc name are required.
# Remember to add/update bash completion in 'tool_lib/tools.bash'
tool_cmd = "genomic_datagen"
tool_desc = "Genomic participant/manifest generator tool"
class ParticipantGeneratorTool(ToolBase):
def run(self):
if self.args.project == 'all-of-us-rdr-prod':
_logger.error(f'Participant generator cannot be used on project: {self.args.project}')
return 1
self.gcp_env.activate_sql_proxy()
now_formatted = clock.CLOCK.now().strftime("%Y-%m-%d-%H-%M-%S")
datagen_run_dao = GenomicDataGenRunDao()
def _build_external_values(row_dict):
excluded_keys = ['participant_count', 'end_to_end_start', 'template_name']
for key in excluded_keys:
del row_dict[key]
for key, value in row_dict.items():
if value.isnumeric():
row_dict[key] = int(value)
return row_dict
if self.args.output_only_run_id:
template_output = GeneratorOutputTemplate(
output_template_name=self.args.output_template_name,
output_run_id=self.args.output_only_run_id
)
generator_output = template_output.run_output_creation()
file_name = f'datagen_run_id_{self.args.output_only_run_id}_{now_formatted}.csv'
output_local_csv(
filename=file_name,
data=generator_output
)
output_path = f'{os.getcwd()}/{file_name}'
_logger.info("File Created: " + output_path)
return 0 # bypass generator
if self.args.output_only_sample_ids:
samples_id_list = []
for sample in self.args.output_only_sample_ids.split(','):
samples_id_list.append(sample.strip())
template_output = GeneratorOutputTemplate(
output_template_name=self.args.output_template_name,
output_sample_ids=samples_id_list
)
generator_output = template_output.run_output_creation()
file_name = f'datagen_sample_ids_{now_formatted}.csv'
output_local_csv(
filename=file_name,
data=generator_output
)
output_path = f'{os.getcwd()}/{file_name}'
_logger.info("File Created: " + output_path)
return 0 # bypass generator
if self.args.spec_path:
if not os.path.exists(self.args.spec_path):
_logger.error(f'File {self.args.spec_path} was not found.')
return 1
with ParticipantGenerator(
logger=_logger
) as participant_generator:
with open(self.args.spec_path, encoding='utf-8-sig') as file:
csv_reader = csv.DictReader(file)
for row in csv_reader:
participant_generator.run_participant_creation(
num_participants=int(row['participant_count']),
template_type=row['template_name'],
external_values=_build_external_values(row)
)
current_run_id = datagen_run_dao.get_max_run_id()[0]
template_output = GeneratorOutputTemplate(
output_template_name=self.args.output_template_name,
output_run_id=current_run_id
)
generator_output = template_output.run_output_creation()
file_name = f'datagen_run_id_{current_run_id}_{now_formatted}.csv'
output_local_csv(
filename=file_name,
data=generator_output
)
output_path = f'{os.getcwd()}/{file_name}'
_logger.info("File Created: " + output_path)
return 0
class ManifestGeneratorTool(ToolBase):
def run(self):
if self.args.project == 'all-of-us-rdr-prod':
_logger.error(f'Manifest generator cannot be used on project: {self.args.project}')
return 1
self.gcp_env.activate_sql_proxy()
server_config = self.get_server_config()
manifest_params = {
"template_name": None,
"sample_ids": None,
"cvl_site_id": None,
"update_samples": self.args.update_samples,
"logger": _logger,
}
if self.args.manifest_template:
manifest_params["template_name"] = self.args.manifest_template
if self.args.sample_id_file:
if not os.path.exists(self.args.sample_id_file):
_logger.error(f'File {self.args.sample_id_file} was not found.')
return 1
with open(self.args.sample_id_file, encoding='utf-8-sig') as file:
csv_reader = csv.reader(file)
sample_ids = []
for row in csv_reader:
sample_ids.append(row[0])
manifest_params["sample_ids"] = sample_ids
if self.args.cvl_site_id:
manifest_params["cvl_site_id"] = self.args.cvl_site_id
if server_config.get('biobank_id_prefix'):
manifest_params['biobank_id_prefix'] = server_config.get('biobank_id_prefix')[0]
# Execute the manifest generator process or the job controller
with ManifestGenerator(**manifest_params) as manifest_generator:
_logger.info("Running Manifest Generator...")
results = manifest_generator.generate_manifest_data()
_logger.info(results['status'])
_logger.info(results['message'])
if results['manifest_data']:
if self.args.output_manifest_directory:
output_path = self.args.output_manifest_directory + "/"
else:
output_path = os.getcwd() + "/"
if self.args.output_manifest_filename:
output_path += self.args.output_manifest_filename
else:
output_path += results['output_filename']
_logger.info("Output path: " + output_path)
# write file
output_local_csv(output_path, results['manifest_data'])
_logger.info("File Created: " + output_path)
return 0
return 1
def output_local_csv(filename, data):
# Create output path if it doesn't exist
if os.path.dirname(filename):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=[k for k in data[0]])
writer.writeheader()
writer.writerows(data)
def get_datagen_process_for_run(args, gcp_env):
datagen_map = {
'participant_generator': ParticipantGeneratorTool(args, gcp_env),
'manifest_generator': ManifestGeneratorTool(args, gcp_env),
}
return datagen_map.get(args.process)
def run():
# Set global debug value and setup application logging.
setup_logging(
_logger, tool_cmd, "--debug" in sys.argv, "{0}.log".format(tool_cmd) if "--log-file" in sys.argv else None
)
setup_i18n()
# Setup program arguments.
parser = argparse.ArgumentParser(prog=tool_cmd, description=tool_desc)
parser.add_argument("--debug", help="enable debug output", default=False, action="store_true") # noqa
parser.add_argument("--log-file", help="write output to a log file", default=False, action="store_true") # noqa
parser.add_argument("--project", help="gcp project name", default="localhost") # noqa
parser.add_argument("--account", help="pmi-ops account", default=None)
parser.add_argument("--service-account", help="gcp iam service account", default=None) # noqa
subparser = parser.add_subparsers(help='', dest='process')
participants = subparser.add_parser("participant_generator")
participants.add_argument("--output-only-run-id", help="outputs only members associated with run id in "
"datagen_run table", default=None) # noqa
participants.add_argument("--output-only-sample-ids", help="outputs only members with sample ids attached to "
"members in the datagen_member_run table",
default=None) # noqa
participants.add_argument("--spec-path", help="path to the request form", default=None) # noqa
participants.add_argument("--test-project", help="type of project being tested ie. 'cvl'", default='cvl',
required=True) # noqa
participants.add_argument("--output-template-name", help="template name for output type, "
"specified in datagen_output_template",
default='default', required=True) # noqa
manifest = subparser.add_parser("manifest_generator")
manifest.add_argument("--manifest-template", help="which manifest to generate",
default=None,
required=True) # noqa
manifest.add_argument("--sample-id-file", help="path to the list of sample_ids to include in manifest. "
"Leave blank for End-to-End manifest (pulls all eligible samples)",
default=None) # noqa
manifest.add_argument("--update-samples",
help="update the result state and manifest job run id field on completion",
default=False, required=False, action="store_true") # noqa
manifest.add_argument("--output-manifest-directory", help="local output directory for the generated manifest"
, default=None) # noqa
manifest.add_argument("--output-manifest-filename", help="what to name the output file",
default=None, required=False) # noqa
manifest.add_argument("--cvl-site-id", help="cvl site to pass to manifest query",
default=None, required=False) # noqa
args = parser.parse_args()
with GCPProcessContext(tool_cmd, args.project, args.account, args.service_account) as gcp_env:
try:
datagen_process = get_datagen_process_for_run(args, gcp_env)
exit_code = datagen_process.run()
# pylint: disable=broad-except
except Exception as e:
_logger.info(f'Error has occured, {e}. For help use "genomic_datagen --help".')
exit_code = 1
return exit_code
# --- Main Program Call ---
if __name__ == "__main__":
sys.exit(run())
| bsd-3-clause | f50f0e3ed6a54b0f383225bca0932a6f | 39.297872 | 118 | 0.582717 | 4.124864 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/tools/tool_libs/data-dictionary.py | 1 | 1056 | import argparse
from rdr_service.config import DATA_DICTIONARY_DOCUMENT_ID
from rdr_service.services.data_dictionary_updater import DataDictionaryUpdater
from rdr_service.tools.tool_libs.tool_base import cli_run, logger, ToolBase
tool_cmd = 'data-dictionary'
tool_desc = "Supplemental tool for managing RDR's data-dictionary (for when the deploy fails to update it)."
class DataDictionaryScript(ToolBase):
def run_process(self):
with self.initialize_process_context() as gcp_env:
self.gcp_env = gcp_env
server_config = self.get_server_config()
updater = DataDictionaryUpdater(server_config[DATA_DICTIONARY_DOCUMENT_ID], self.args.rdr_version)
updater.run_update_in_tool(self, logger)
def add_additional_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
'--rdr-version',
help='Version number of the RDR release to label the changes with in the data dictionary.'
)
def run():
cli_run(tool_cmd, tool_desc, DataDictionaryScript, add_additional_arguments)
| bsd-3-clause | a5d9ad149de33ab9e41695a02ad0b9b2 | 34.2 | 108 | 0.731061 | 3.666667 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/806f7c2031a5_add_order_finalized_time.py | 1 | 2116 | """Add order finalized time
Revision ID: 806f7c2031a5
Revises: 5ec19e6b8726
Create Date: 2019-10-23 13:19:45.264759
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '806f7c2031a5'
down_revision = '5ec19e6b8726'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('biobank_history', sa.Column('finalized_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('biobank_order', sa.Column('finalized_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('biobank_order', 'finalized_time')
op.drop_column('biobank_history', 'finalized_time')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 96e0bf21544cab56ccc6e0285a45a794 | 33.129032 | 125 | 0.749055 | 3.56229 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/devicemetric.py | 1 | 5021 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/DeviceMetric) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class DeviceMetric(domainresource.DomainResource):
""" Measurement, calculation or setting capability of a medical device.
Describes a measurement, calculation or setting capability of a medical
device.
"""
resource_name = "DeviceMetric"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.calibration = None
""" Describes the calibrations that have been performed or that are
required to be performed.
List of `DeviceMetricCalibration` items (represented as `dict` in JSON). """
self.category = None
""" measurement | setting | calculation | unspecified.
Type `str`. """
self.color = None
""" black | red | green | yellow | blue | magenta | cyan | white.
Type `str`. """
self.identifier = None
""" Unique identifier of this DeviceMetric.
Type `Identifier` (represented as `dict` in JSON). """
self.measurementPeriod = None
""" Describes the measurement repetition time.
Type `Timing` (represented as `dict` in JSON). """
self.operationalStatus = None
""" on | off | standby.
Type `str`. """
self.parent = None
""" Describes the link to the parent DeviceComponent.
Type `FHIRReference` referencing `DeviceComponent` (represented as `dict` in JSON). """
self.source = None
""" Describes the link to the source Device.
Type `FHIRReference` referencing `Device` (represented as `dict` in JSON). """
self.type = None
""" Type of metric.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.unit = None
""" Unit of metric.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(DeviceMetric, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceMetric, self).elementProperties()
js.extend([
("calibration", "calibration", DeviceMetricCalibration, True, None, False),
("category", "category", str, False, None, True),
("color", "color", str, False, None, False),
("identifier", "identifier", identifier.Identifier, False, None, True),
("measurementPeriod", "measurementPeriod", timing.Timing, False, None, False),
("operationalStatus", "operationalStatus", str, False, None, False),
("parent", "parent", fhirreference.FHIRReference, False, None, False),
("source", "source", fhirreference.FHIRReference, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
("unit", "unit", codeableconcept.CodeableConcept, False, None, False),
])
return js
from . import backboneelement
class DeviceMetricCalibration(backboneelement.BackboneElement):
""" Describes the calibrations that have been performed or that are required to
be performed.
"""
resource_name = "DeviceMetricCalibration"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.state = None
""" not-calibrated | calibration-required | calibrated | unspecified.
Type `str`. """
self.time = None
""" Describes the time last calibration has been performed.
Type `FHIRDate` (represented as `str` in JSON). """
self.type = None
""" unspecified | offset | gain | two-point.
Type `str`. """
super(DeviceMetricCalibration, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceMetricCalibration, self).elementProperties()
js.extend([
("state", "state", str, False, None, False),
("time", "time", fhirdate.FHIRDate, False, None, False),
("type", "type", str, False, None, False),
])
return js
from . import codeableconcept
from . import fhirdate
from . import fhirreference
from . import identifier
from . import timing
| bsd-3-clause | 6d7b82965e1fd0911ef4b97d541e98a3 | 37.037879 | 103 | 0.609042 | 4.459147 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/2c3a71f9fc04_add_genomic_set_member_columns_for_aw3_.py | 1 | 3524 | """add_genomic_set_member columns_for_aw3_aw4
Revision ID: 2c3a71f9fc04
Revises: c069abb92cc0
Create Date: 2020-08-25 08:57:17.987756
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '2c3a71f9fc04'
down_revision = 'c069abb92cc0'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('aw3_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('aw3_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member', sa.Column('aw4_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('aw4_manifest_job_run_id', sa.Integer(), nullable=True))
op.drop_constraint('genomic_set_member_ibfk_23', 'genomic_set_member', type_='foreignkey')
op.drop_constraint('genomic_set_member_ibfk_24', 'genomic_set_member', type_='foreignkey')
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['aw3_manifest_job_run_id'], ['id'])
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['aw4_manifest_job_run_id'], ['id'])
op.drop_column('genomic_set_member', 'wgs_aw3_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'wgs_aw3_manifest_job_run_id')
op.drop_column('genomic_set_member', 'arr_aw3_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'arr_aw3_manifest_job_run_id')
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('arr_aw3_manifest_job_run_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column('genomic_set_member_history', sa.Column('arr_aw3_manifest_job_run_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column('genomic_set_member', sa.Column('wgs_aw3_manifest_job_run_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column('genomic_set_member_history', sa.Column('wgs_aw3_manifest_job_run_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.create_foreign_key('genomic_set_member_ibfk_24', 'genomic_set_member', 'genomic_job_run', ['wgs_aw3_manifest_job_run_id'], ['id'])
op.create_foreign_key('genomic_set_member_ibfk_23', 'genomic_set_member', 'genomic_job_run', ['arr_aw3_manifest_job_run_id'], ['id'])
op.drop_column('genomic_set_member', 'aw4_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'aw4_manifest_job_run_id')
op.drop_column('genomic_set_member', 'aw3_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'aw3_manifest_job_run_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 4e3ed6020d55720fa7ffbc3dbf3b3d2d | 42.506173 | 158 | 0.695516 | 2.914806 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/codeableconcept.py | 1 | 1591 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/CodeableConcept) on 2017-03-22.
# 2017, SMART Health IT.
from . import element
class CodeableConcept(element.Element):
""" Concept - reference to a terminology or just text.
A concept that may be defined by a formal reference to a terminology or
ontology or may be provided by text.
"""
resource_type = "CodeableConcept"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.coding = None
""" Code defined by a terminology system.
List of `Coding` items (represented as `dict` in JSON). """
self.text = None
""" Plain text representation of the concept.
Type `str`. """
super(CodeableConcept, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CodeableConcept, self).elementProperties()
js.extend([
("coding", "coding", coding.Coding, True, None, False),
("text", "text", str, False, None, False),
])
return js
import sys
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
| bsd-3-clause | 58b57eb2c1a804347ab7bfbe124433d1 | 30.82 | 107 | 0.620365 | 4.111111 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/rdr_client/dv_order.py | 1 | 2337 | """Simple client demonstrating how to create and retrieve a participant"""
import logging
import pprint
from rdr_service.main_util import configure_logging
from rdr_service.rdr_client.client import Client
def main():
client = Client()
body = {
"authoredOn": "2019-02-12",
"contained": [
{"id": "supplier-1", "name": "GenoTek", "resourceType": "Organization"},
{
"deviceName": [{"name": "GenoTek DNA Kit", "type": "manufacturer-name"}],
"id": "device-1",
"identifier": [{"code": "4081", "system": "SKU"}, {"code": "SNOMED CODE TBD", "system": "SNOMED"}],
"resourceType": "Device",
},
{
"address": [
{
"city": "FakeVille",
"line": ["123 Fake St"],
"postalCode": "22155",
"state": "VA",
"type": "postal",
"use": "home",
}
],
"id": "847299265",
"identifier": [{"system": "participantId", "value": "847299265"}],
"resourceType": "Patient",
},
],
"deliverFrom": {"reference": "#supplier-1"},
"deliverTo": {"reference": "Patient/#patient-1"},
"extension": [
{"url": "http://vibrenthealth.com/fhir/barcode", "valueString": "AAAA20160121ZZZZ"},
{"url": "http://vibrenthealth.com/fhir/order-type", "valueString": "salivary pilot"},
{"url": "http://vibrenthealth.com/fhir/fulfillment-status", "valueString": "shipped"},
],
"identifier": [{"code": "123", "system": "orderId"}, {"code": "B0A0A0A", "system": "fulfillmentId"}],
"itemReference": {"reference": "#device-1"},
"quantity": {"value": 1},
"requester": {"reference": "Patient/patient-1"},
"resourceType": "SupplyRequest",
"status": "completed",
"supplier": {"reference": "#supplier-1"},
"text": {"div": "....", "status": "generated"},
}
response = client.request_json("Participant/P847299265/DvOrder/12347", "PUT", body)
logging.info(pprint.pformat(response))
if __name__ == "__main__":
configure_logging()
main()
| bsd-3-clause | d7361f617ddba55dd76bc9754cb65b4a | 37.95 | 115 | 0.489944 | 3.787682 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/app_util.py | 1 | 16764 | import calendar
import datetime
import email.utils
import flask
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
import logging
from requests.exceptions import RequestException
from time import sleep
from typing import Callable, Collection
import urllib.parse
import netaddr
import pytz
import requests
from flask import request
from werkzeug.exceptions import Forbidden, Unauthorized, GatewayTimeout
from rdr_service import clock, config
from rdr_service.api import base_api
from rdr_service.config import GAE_PROJECT
_GMT = pytz.timezone("GMT")
SCOPE = "https://www.googleapis.com/auth/userinfo.email"
GLOBAL_CLIENT_ID_KEY = 'oauth_client_id'
def handle_database_disconnect(err):
"""Intended to catch DBAPIError's thrown during a request cycle and transform them into 503's.
If the DBAPIError does not represent an invalidated connection, reraise the error.
Usage: app.register_error_handler(DBAPIError, handle_database_disconnect)
"""
if err.connection_invalidated:
return "DB connection lost, please retry", 503
raise err
def auth_required_cron(func):
"""A decorator that ensures that the user is a cron job."""
def wrapped(*args, **kwargs):
check_cron()
return func(*args, **kwargs)
return wrapped
def task_auth_required(func):
"""A decorator that ensures that the user is a task job."""
def wrapped(*args, **kwargs):
if GAE_PROJECT == "localhost" or (
request.headers.get("X-Appengine-Taskname") and "AppEngine-Google" in request.headers.get("User-Agent", "")
):
logging.info("App Engine task request ALLOWED for task endpoint.")
return func(*args, **kwargs)
logging.info("User {} NOT ALLOWED for task endpoint".format(get_oauth_id()))
raise Forbidden()
return wrapped
def nonprod(func):
"""The decorated function may never run in environments without config.ALLOW_NONPROD_REQUESTS."""
def wrapped(*args, **kwargs):
if not config.getSettingJson(config.ALLOW_NONPROD_REQUESTS, False):
raise Forbidden("Request not allowed in production environment (according to config).")
return func(*args, **kwargs)
return wrapped
def check_auth(role_allowed_list):
"""Raises Unauthorized or Forbidden if the current user is not allowed."""
user_email, user_info = get_validated_user_info()
if set(user_info.get("roles", [])) & set(role_allowed_list):
return
logging.warning(f"User {user_email} has roles {user_info.get('roles')}, but {role_allowed_list} is required")
raise Forbidden()
def get_auth_token():
header = request.headers.get("Authorization", '')
try:
return header.split(' ', 1)[1]
except IndexError:
raise ValueError(f"Invalid Authorization Header: {header}")
def get_token_info_response(token, use_tokeninfo=False):
verification_endpoint = 'userinfo'
if use_tokeninfo:
verification_endpoint = 'tokeninfo'
google_tokeninfo_url = 'https://www.googleapis.com/oauth2/v3/' + verification_endpoint
qargs = urllib.parse.urlencode({'access_token': token})
response = requests.get(f"{google_tokeninfo_url}?{qargs}")
return response
def get_oauth_id():
"""Returns user email ID if OAUTH token present, or None."""
'''
NOTES: 2019-08-15 by tanner and mikey
currently verifies that the provided token
is legitimate via google API.
- performance
- could be validated locally instead of with API
'''
if flask.g and GLOBAL_CLIENT_ID_KEY in flask.g:
return getattr(flask.g, GLOBAL_CLIENT_ID_KEY)
retries = 5
use_tokeninfo_endpoint = False
while retries:
retries -= 1
if GAE_PROJECT == 'localhost': # NOTE: 2019-08-15 mimic devappserver.py behavior
return config.LOCAL_AUTH_USER
try:
token = get_auth_token()
except ValueError as e:
logging.info(f"Invalid Authorization Token: {e}")
return None
else:
try:
response = get_token_info_response(token, use_tokeninfo=use_tokeninfo_endpoint)
except RequestException as e: # Catching any connection or decoding errors that could be thrown
logging.warning(f'Error validating token: {e}')
else:
if response.status_code == 200:
data = response.json()
if use_tokeninfo_endpoint: # UserInfo doesn't return expiry info :(
token_expiry_seconds = data.get('expires_in')
logging.info(f'Token expiring in {token_expiry_seconds} seconds')
user_email = data.get('email')
if user_email is None:
logging.error('UserInfo endpoint did not return the email')
use_tokeninfo_endpoint = True
else:
if flask.g:
setattr(flask.g, GLOBAL_CLIENT_ID_KEY, user_email)
return user_email
else:
logging.info(f"Oauth failure: {response.content} (status: {response.status_code})")
if response.status_code in [400, 401]: # tokeninfo returns 400
raise Unauthorized
elif not use_tokeninfo_endpoint:
logging.error("UserInfo failed, falling back on Tokeninfo")
use_tokeninfo_endpoint = True
sleep(0.25)
logging.info('Retrying authentication call to Google after failure.')
raise GatewayTimeout('Google authentication services is not available, try again later.')
def check_cron():
"""Raises Forbidden if the current user is not a cron job."""
if request.headers.get("X-Appengine-Cron"):
logging.info("Appengine-Cron ALLOWED for cron endpoint.")
return
logging.info("User {} NOT ALLOWED for cron endpoint".format(get_oauth_id()))
raise Forbidden()
def lookup_user_info(user_email):
return config.getSettingJson(config.USER_INFO, {}).get(user_email)
def get_account_origin_id():
"""
Returns the clientId value set in the config for the user.
:return: Client Id
"""
auth_email = get_oauth_id()
user_info = lookup_user_info(auth_email)
client_id = user_info.get('clientId', None)
from rdr_service.api_util import DEV_MAIL
if not client_id:
if auth_email == DEV_MAIL:
# TODO: This is a hack because something sets up configs different
# when running all tests and it doesnt have the clientId key.
client_id = "example"
return client_id
def is_self_request():
return (
request.remote_addr is None
and config.getSettingJson(config.ALLOW_NONPROD_REQUESTS, False)
and not request.headers.get("unauthenticated")
)
def get_allowed_ips(user_info):
# double_check
allowed_ip_ranges = user_info.get("allow_list_ip_ranges") or user_info.get("whitelisted_ip_ranges")
if not allowed_ip_ranges:
return None
return [
netaddr.IPNetwork(rng)
for rng in allowed_ip_ranges.get("ip6", [])
+ allowed_ip_ranges.get("ip4", [])
]
def enforce_ip_allowed(request_ip, allowed_ips):
if not allowed_ips: # No allowed ips means "don't apply restrictions"
return
logging.info("IP RANGES ALLOWED: {}".format(allowed_ips))
ip = netaddr.IPAddress(request_ip)
if not bool([True for rng in allowed_ips if ip in rng]):
logging.info("IP {} NOT ALLOWED".format(ip))
raise Forbidden("Client IP not allowed: {}".format(ip))
logging.info("IP {} ALLOWED".format(ip))
def get_allowed_appids(user_info):
# double_check
allowed_app_ids = user_info.get("allow_list_appids") or user_info.get("whitelisted_appids")
return allowed_app_ids
def enforce_appid_allowed(request_app_id, allowed_appids):
if not allowed_appids: # No allowed_appids means "don't apply restrictions"
return
if request_app_id:
if request_app_id in allowed_appids:
logging.info("APP ID {} ALLOWED".format(request_app_id))
return
else:
logging.info("APP ID {} NOT FOUND IN {}".format(request_app_id, allowed_appids))
else:
logging.info("NO APP ID FOUND WHEN REQUIRED TO BE ONE OF: {}".format(allowed_appids))
raise Forbidden()
def add_headers(response):
"""Add uniform headers to all API responses.
All responses are JSON, so we tag them as such at the app level to provide uniform protection
against content-sniffing-based attacks.
"""
response.headers["Content-Disposition"] = 'attachment; filename="f.txt"'
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["Content-Type"] = "application/json; charset=utf-8" # override to add charset
response.headers["Date"] = email.utils.formatdate(
calendar.timegm(pytz.utc.localize(clock.CLOCK.now()).astimezone(_GMT).timetuple()), usegmt=True
)
response.headers["Pragma"] = "no-cache"
response.headers["Cache-control"] = "no-cache, must-revalidate"
# Expire at some date in the past: the epoch.
response.headers["Expires"] = email.utils.formatdate(0.0, usegmt=True)
return response
def request_logging():
"""Some uniform logging of request characteristics before any checks are applied."""
logging.info("Request protocol: HTTPS={}".format(request.environ.get("HTTPS")))
def auth_required(role_allowed_list):
"""A decorator that keeps the function from being called without auth.
role_allowed_list can be a string or list of strings specifying one or
more roles that are allowed to call the function. """
if not role_allowed_list:
raise AssertionError("Can't call auth_required with empty role_allowed_list.")
if not isinstance(role_allowed_list, list):
role_allowed_list = [role_allowed_list]
def auth_required_wrapper(func):
def wrapped(*args, **kwargs):
appid = GAE_PROJECT
request.log_record = base_api.log_api_request()
# Only enforce HTTPS and auth for external requests; requests made for data generation
# are allowed through (when enabled).
acceptable_hosts = ("None", "testbed-test", "testapp", "localhost", "127.0.0.1")
# logging.info(str(request.headers))
if not is_self_request():
if request.scheme.lower() != "https" and appid not in acceptable_hosts:
raise Unauthorized(f"HTTPS is required for {appid}", www_authenticate='Bearer realm="rdr"')
check_auth(role_allowed_list)
request.logged = False
result = func(*args, **kwargs)
if request.logged is False:
try:
base_api.log_api_request(log=request.log_record)
except RuntimeError:
# Unittests don't always setup a valid flask request context.
pass
return result
return wrapped
return auth_required_wrapper
def restrict_to_gae_project(allowed_project_list):
"""
A decorator for restricting access of a method
to a particular Google App Engine Project
:param project_list: list of GAE ids, i.e. 'all-of-us-rdr-stable', etc.
:return: function result or Forbidden
"""
def restriction_function_wrapper(func):
def inner(*args, **kwargs):
app_id = GAE_PROJECT
# Check app_id against the registered environments
if app_id in allowed_project_list:
result = func(*args, **kwargs)
else:
raise Forbidden(f'This operation is forbidden on {app_id}')
return result
return inner
return restriction_function_wrapper
def get_validated_user_info():
"""Returns a valid (user email, user info), or raises Unauthorized or Forbidden."""
user_email = get_oauth_id()
# Allow clients to simulate an unauthenticated request (for testing)
# because we haven't found another way to create an unauthenticated request
# when using dev_appserver. When client tests are checking to ensure that an
# unauthenticated requests gets rejected, they helpfully add this header.
# The `application_id` check ensures this feature only works in dev_appserver.
if request.headers.get("unauthenticated") and GAE_PROJECT == 'localhost':
user_email = None
if user_email is None:
raise Unauthorized("No OAuth user found.")
user_info = lookup_user_info(user_email)
if user_info:
if 'X-Appengine-User-Ip' in request.headers:
addr = request.headers.get('X-Appengine-User-Ip')
else:
addr = request.remote_addr
enforce_ip_allowed(addr, get_allowed_ips(user_info))
enforce_appid_allowed(request.headers.get("X-Appengine-Inbound-Appid"), get_allowed_appids(user_info))
logging.info(f"User {user_email} ALLOWED")
return (user_email, user_info)
logging.info(f"User {user_email} NOT ALLOWED")
raise Forbidden()
class ObjectView(object):
"""access dict attributes as an object"""
def __init__(self, d):
self.__dict__ = d
class ObjDict(dict):
"""Subclass dict to treat new dicts like objects"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def datetime_as_naive_utc(value):
if not isinstance(value, datetime.datetime):
raise TypeError("datetime_as_naive_utc() only works on datetime.datetime values")
if value.tzinfo is None:
return value
else:
return value.astimezone(pytz.UTC).replace(tzinfo=None)
def is_care_evo_and_not_prod():
return GAE_PROJECT != "all-of-us-rdr-prod" and get_account_origin_id() == "careevolution"
def install_rate_limiting(app):
cache_location = config.getSettingJson('cache_storage_location', default='memory://')
default_rate_limit = config.getSettingJson('default_rate_limit', default='15/second')
Limiter(
app,
key_func=lambda: get_oauth_id() or get_remote_address(),
default_limits=[default_rate_limit],
storage_uri=cache_location,
in_memory_fallback_enabled=True # Use local memory if cache not found (throws an error otherwise)
)
class BatchManager:
"""Useful for applying a function to a batch of objects."""
def __init__(self, batch_size: int, callback: Callable[[Collection], None]):
"""
Initializes the instance of the batch manager.
:param batch_size: The number of items to collect before sending them to the callback.
:param callback: A method that is meant to process the collected batches.
It should only have one parameter to accept the batch of objects and should not return any value.
"""
self._batch_size = batch_size
self._callback = callback
self._collected_objects = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._callback(self._collected_objects)
def add(self, obj):
"""
Adds an object to the current batch. If adding the object causes the number of objects to reach the batch_size
then the callback is called with the objects (including the one that was just added).
"""
self._collected_objects.append(obj)
if len(self._collected_objects) == self._batch_size:
self._callback(self._collected_objects)
self._collected_objects = []
def is_datetime_equal(first: datetime, second: datetime, difference_allowed_seconds: int = 0) -> bool:
"""
Compares two datetimes to determine whether they're equivalent or not.
:param first: First date.
:param second: Second date.
:param difference_allowed_seconds: The number of seconds that the two dates can be different before they're
determined to not be a match. Defaults to 0.
:return: False if they're more than the specified number of seconds apart.
"""
if first is None and second is None:
return True
elif first is None or second is None:
return False
else:
return abs((first - second).total_seconds()) <= difference_allowed_seconds
| bsd-3-clause | 4d3e96e2fe000dcd4f6d092171852373 | 35.522876 | 119 | 0.64561 | 4.060063 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/f69e4a978a1f_adding_branching_logic_to_survey_.py | 1 | 1949 | """adding branching logic to survey questions
Revision ID: f69e4a978a1f
Revises: 615d1f3a5cd4
Create Date: 2022-03-14 14:04:08.341608
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'f69e4a978a1f'
down_revision = '615d1f3a5cd4'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('survey_question', sa.Column('branching_logic', sa.String(length=1024), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('survey_question', 'branching_logic')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | b6129f43b25dfbc92ab394262a5ba451 | 31.483333 | 125 | 0.752181 | 3.582721 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/library.py | 1 | 8296 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/Library) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class Library(domainresource.DomainResource):
""" Represents a library of quality improvement components.
The Library resource is a general-purpose container for knowledge asset
definitions. It can be used to describe and expose existing knowledge
assets such as logic libraries and information model descriptions, as well
as to describe a collection of knowledge assets.
"""
resource_type = "Library"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.approvalDate = None
""" When the library was approved by publisher.
Type `FHIRDate` (represented as `str` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.content = None
""" Contents of the library, either embedded or referenced.
List of `Attachment` items (represented as `dict` in JSON). """
self.contributor = None
""" A content contributor.
List of `Contributor` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.dataRequirement = None
""" What data is referenced by this library.
List of `DataRequirement` items (represented as `dict` in JSON). """
self.date = None
""" Date this was last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the library.
Type `str`. """
self.effectivePeriod = None
""" When the library is expected to be used.
Type `Period` (represented as `dict` in JSON). """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.identifier = None
""" Additional identifier for the library.
List of `Identifier` items (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for library (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.lastReviewDate = None
""" When the library was last reviewed.
Type `FHIRDate` (represented as `str` in JSON). """
self.name = None
""" Name for this library (computer friendly).
Type `str`. """
self.parameter = None
""" Parameters defined by the library.
List of `ParameterDefinition` items (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this library is defined.
Type `str`. """
self.relatedArtifact = None
""" Additional documentation, citations, etc..
List of `RelatedArtifact` items (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.title = None
""" Name for this library (human friendly).
Type `str`. """
self.topic = None
""" E.g. Education, Treatment, Assessment, etc.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" logic-library | model-definition | asset-collection | module-
definition.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.url = None
""" Logical URI to reference this library (globally unique).
Type `str`. """
self.usage = None
""" Describes the clinical usage of the library.
Type `str`. """
self.useContext = None
""" Context the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the library.
Type `str`. """
super(Library, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Library, self).elementProperties()
js.extend([
("approvalDate", "approvalDate", fhirdate.FHIRDate, False, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("content", "content", attachment.Attachment, True, None, False),
("contributor", "contributor", contributor.Contributor, True, None, False),
("copyright", "copyright", str, False, None, False),
("dataRequirement", "dataRequirement", datarequirement.DataRequirement, True, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("effectivePeriod", "effectivePeriod", period.Period, False, None, False),
("experimental", "experimental", bool, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("lastReviewDate", "lastReviewDate", fhirdate.FHIRDate, False, None, False),
("name", "name", str, False, None, False),
("parameter", "parameter", parameterdefinition.ParameterDefinition, True, None, False),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("relatedArtifact", "relatedArtifact", relatedartifact.RelatedArtifact, True, None, False),
("status", "status", str, False, None, True),
("title", "title", str, False, None, False),
("topic", "topic", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
("url", "url", str, False, None, False),
("usage", "usage", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
import sys
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import contributor
except ImportError:
contributor = sys.modules[__package__ + '.contributor']
try:
from . import datarequirement
except ImportError:
datarequirement = sys.modules[__package__ + '.datarequirement']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import parameterdefinition
except ImportError:
parameterdefinition = sys.modules[__package__ + '.parameterdefinition']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import relatedartifact
except ImportError:
relatedartifact = sys.modules[__package__ + '.relatedartifact']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| bsd-3-clause | ecca9d52ed810102a63e69f21252f65f | 37.948357 | 103 | 0.603544 | 4.474649 | false | false | false | false |
django/django-localflavor | localflavor/nl/forms.py | 3 | 3623 | """NL-specific Form helpers."""
import re
from django import forms
from .nl_provinces import PROVINCE_CHOICES
from .validators import NLBSNFieldValidator, NLLicensePlateFieldValidator, NLZipCodeFieldValidator
class NLZipCodeField(forms.CharField):
"""A Dutch zip code field."""
default_validators = [NLZipCodeFieldValidator()]
def clean(self, value):
if isinstance(value, str):
value = value.upper().replace(' ', '')
if len(value) == 6:
value = '%s %s' % (value[:4], value[4:])
return super().clean(value)
class NLProvinceSelect(forms.Select):
"""A Select widget that uses a list of provinces of the Netherlands as it's choices."""
def __init__(self, attrs=None):
super().__init__(attrs, choices=PROVINCE_CHOICES)
class NLBSNFormField(forms.CharField):
"""
A Dutch social security number (BSN) field.
https://nl.wikipedia.org/wiki/Burgerservicenummer
Note that you may only process the BSN if you have a legal basis to do so!
.. versionadded:: 1.6
"""
default_validators = [NLBSNFieldValidator()]
def __init__(self, **kwargs):
kwargs['max_length'] = 9
super().__init__(**kwargs)
class NLLicensePlateFormField(forms.CharField):
"""
A Dutch license plate field.
https://www.rdw.nl/
https://nl.wikipedia.org/wiki/Nederlands_kenteken
.. versionadded:: 2.1
"""
default_validators = [NLLicensePlateFieldValidator()]
SANITIZE_REGEXS = {
"sidecode1": re.compile(r"^([A-Z]{2})([0-9]{2})([0-9]{2})$"), # AA-99-99
"sidecode2": re.compile(r"^([0-9]{2})([0-9]{2})([A-Z]{2})$"), # 99-99-AA
"sidecode3": re.compile(r"^([0-9]{2})([A-Z]{2})([0-9]{2})$"), # 99-AA-99
"sidecode4": re.compile(r"^([A-Z]{2})([0-9]{2})([A-Z]{2})$"), # AA-99-AA
"sidecode5": re.compile(r"^([A-Z]{2})([A-Z]{2})([0-9]{2})$"), # AA-AA-99
"sidecode6": re.compile(r"^([0-9]{2})([A-Z]{2})([A-Z]{2})$"), # 99-AA-AA
"sidecode7": re.compile(r"^([0-9]{2})([A-Z]{3})([0-9]{1})$"), # 99-AAA-9
"sidecode8": re.compile(r"^([0-9]{1})([A-Z]{3})([0-9]{2})$"), # 9-AAA-99
"sidecode9": re.compile(r"^([A-Z]{2})([0-9]{3})([A-Z]{1})$"), # AA-999-A
"sidecode10": re.compile(r"^([A-Z]{1})([0-9]{3})([A-Z]{2})$"), # A-999-AA
"sidecode11": re.compile(r"^([A-Z]{3})([0-9]{2})([A-Z]{1})$"), # AAA-99-A
"sidecode12": re.compile(r"^([A-Z]{1})([0-9]{2})([A-Z]{3})$"), # A-99-AAA
"sidecode13": re.compile(r"^([0-9]{1})([A-Z]{2})([0-9]{3})$"), # 9-AA-999
"sidecode14": re.compile(r"^([0-9]{3})([A-Z]{2})([0-9]{1})$"), # 999-AA-9
"sidecode_koninklijk_huis": re.compile(r"^(AA)([0-9]{2,3})(([0-9]{2})?)$"), # AA-99(-99)?
"sidecode_internationaal_gerechtshof": re.compile(r"^(CDJ)([0-9]{3})$"), # CDJ-999
"sidecode_bijzondere_toelating": re.compile(r"^(ZZ)([0-9]{2})([0-9]{2})$"), # ZZ-99-99
"sidecode_tijdelijk_voor_een_dag": re.compile(r"^(F)([0-9]{2})([0-9]{2})$"), # F-99-99
"sidecode_voertuig_binnen_of_buiten_nederland_brengen": re.compile(r"^(Z)([0-9]{2})([0-9]{2})$"), # Z-99-99
}
def __init__(self, **kwargs):
kwargs['max_length'] = 8
super().__init__(**kwargs)
def clean(self, value):
value = super().clean(value)
if value:
value = value.upper().replace('-', '')
for sidecode, regex in self.SANITIZE_REGEXS.items():
match = regex.match(value)
if match:
return '-'.join(match.groups())
return value
| bsd-3-clause | c93f9a3c36bf6acbcab4f599d1163b4e | 36.350515 | 116 | 0.538504 | 2.673801 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/adverseevent.py | 1 | 10267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/AdverseEvent) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class AdverseEvent(domainresource.DomainResource):
""" Medical care, research study or other healthcare event causing physical
injury.
Actual or potential/avoided event causing unintended physical injury
resulting from or contributed to by medical care, a research study or other
healthcare setting factors that requires additional monitoring, treatment,
or hospitalization, or that results in death.
"""
resource_type = "AdverseEvent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.actuality = None
""" actual | potential.
Type `str`. """
self.category = None
""" product-problem | product-quality | product-use-error | wrong-dose
| incorrect-prescribing-information | wrong-technique | wrong-
route-of-administration | wrong-rate | wrong-duration | wrong-time
| expired-drug | medical-device-use-error | problem-different-
manufacturer | unsafe-physical-environment.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.contributor = None
""" Who was involved in the adverse event or the potential adverse
event.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.date = None
""" When the event occurred.
Type `FHIRDate` (represented as `str` in JSON). """
self.detected = None
""" When the event was detected.
Type `FHIRDate` (represented as `str` in JSON). """
self.encounter = None
""" Encounter created as part of.
Type `FHIRReference` (represented as `dict` in JSON). """
self.event = None
""" Type of the event itself in relation to the subject.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.identifier = None
""" Business identifier for the event.
Type `Identifier` (represented as `dict` in JSON). """
self.location = None
""" Location where adverse event occurred.
Type `FHIRReference` (represented as `dict` in JSON). """
self.outcome = None
""" resolved | recovering | ongoing | resolvedWithSequelae | fatal |
unknown.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.recordedDate = None
""" When the event was recorded.
Type `FHIRDate` (represented as `str` in JSON). """
self.recorder = None
""" Who recorded the adverse event.
Type `FHIRReference` (represented as `dict` in JSON). """
self.referenceDocument = None
""" AdverseEvent.referenceDocument.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.resultingCondition = None
""" Effect on the subject due to this event.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.seriousness = None
""" Seriousness of the event.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.severity = None
""" mild | moderate | severe.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.study = None
""" AdverseEvent.study.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.subject = None
""" Subject impacted by event.
Type `FHIRReference` (represented as `dict` in JSON). """
self.subjectMedicalHistory = None
""" AdverseEvent.subjectMedicalHistory.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.suspectEntity = None
""" The suspected agent causing the adverse event.
List of `AdverseEventSuspectEntity` items (represented as `dict` in JSON). """
super(AdverseEvent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AdverseEvent, self).elementProperties()
js.extend([
("actuality", "actuality", str, False, None, True),
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("contributor", "contributor", fhirreference.FHIRReference, True, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("detected", "detected", fhirdate.FHIRDate, False, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("event", "event", codeableconcept.CodeableConcept, False, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("location", "location", fhirreference.FHIRReference, False, None, False),
("outcome", "outcome", codeableconcept.CodeableConcept, False, None, False),
("recordedDate", "recordedDate", fhirdate.FHIRDate, False, None, False),
("recorder", "recorder", fhirreference.FHIRReference, False, None, False),
("referenceDocument", "referenceDocument", fhirreference.FHIRReference, True, None, False),
("resultingCondition", "resultingCondition", fhirreference.FHIRReference, True, None, False),
("seriousness", "seriousness", codeableconcept.CodeableConcept, False, None, False),
("severity", "severity", codeableconcept.CodeableConcept, False, None, False),
("study", "study", fhirreference.FHIRReference, True, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("subjectMedicalHistory", "subjectMedicalHistory", fhirreference.FHIRReference, True, None, False),
("suspectEntity", "suspectEntity", AdverseEventSuspectEntity, True, None, False),
])
return js
from . import backboneelement
class AdverseEventSuspectEntity(backboneelement.BackboneElement):
""" The suspected agent causing the adverse event.
Describes the entity that is suspected to have caused the adverse event.
"""
resource_type = "AdverseEventSuspectEntity"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.causality = None
""" Information on the possible cause of the event.
List of `AdverseEventSuspectEntityCausality` items (represented as `dict` in JSON). """
self.instance = None
""" Refers to the specific entity that caused the adverse event.
Type `FHIRReference` (represented as `dict` in JSON). """
super(AdverseEventSuspectEntity, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AdverseEventSuspectEntity, self).elementProperties()
js.extend([
("causality", "causality", AdverseEventSuspectEntityCausality, True, None, False),
("instance", "instance", fhirreference.FHIRReference, False, None, True),
])
return js
class AdverseEventSuspectEntityCausality(backboneelement.BackboneElement):
""" Information on the possible cause of the event.
"""
resource_type = "AdverseEventSuspectEntityCausality"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assessment = None
""" Assessment of if the entity caused the event.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.author = None
""" AdverseEvent.suspectEntity.causalityAuthor.
Type `FHIRReference` (represented as `dict` in JSON). """
self.method = None
""" ProbabilityScale | Bayesian | Checklist.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.productRelatedness = None
""" AdverseEvent.suspectEntity.causalityProductRelatedness.
Type `str`. """
super(AdverseEventSuspectEntityCausality, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AdverseEventSuspectEntityCausality, self).elementProperties()
js.extend([
("assessment", "assessment", codeableconcept.CodeableConcept, False, None, False),
("author", "author", fhirreference.FHIRReference, False, None, False),
("method", "method", codeableconcept.CodeableConcept, False, None, False),
("productRelatedness", "productRelatedness", str, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
| bsd-3-clause | 48d6f1bdace09386971dd875e62d9638 | 41.60166 | 111 | 0.631441 | 4.221628 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/binary_tests.py | 1 | 1330 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import binary
from .fhirdate import FHIRDate
class BinaryTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Binary", js["resourceType"])
return binary.Binary(js)
def testBinary1(self):
inst = self.instantiate_from("binary-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Binary instance")
self.implBinary1(inst)
js = inst.as_json()
self.assertEqual("Binary", js["resourceType"])
inst2 = binary.Binary(js)
self.implBinary1(inst2)
def implBinary1(self, inst):
self.assertEqual(inst.contentType, "application/pdf")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
| bsd-3-clause | d79031b8de342c560f6a5d55722a98e5 | 32.25 | 103 | 0.65188 | 3.436693 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/0ffdadea0b92_adding_measurement.py | 1 | 2991 | """Adding measurement
Revision ID: 0ffdadea0b92
Revises: ffcd82a35890
Create Date: 2017-08-30 17:33:54.104062
"""
import model.utils
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "0ffdadea0b92"
down_revision = "ffcd82a35890"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"measurement",
sa.Column("measurement_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("physical_measurements_id", sa.Integer(), nullable=False),
sa.Column("code_system", sa.String(length=255), nullable=False),
sa.Column("code_value", sa.String(length=255), nullable=False),
sa.Column("measurement_time", model.utils.UTCDateTime(), nullable=False),
sa.Column("body_site_code_system", sa.String(length=255), nullable=True),
sa.Column("body_site_code_value", sa.String(length=255), nullable=True),
sa.Column("value_string", sa.String(length=1024), nullable=True),
sa.Column("value_decimal", sa.Float(), nullable=True),
sa.Column("value_unit", sa.String(length=255), nullable=True),
sa.Column("value_code_system", sa.String(length=255), nullable=True),
sa.Column("value_code_value", sa.String(length=255), nullable=True),
sa.Column("value_datetime", model.utils.UTCDateTime(), nullable=True),
sa.Column("parent_id", sa.BIGINT(), nullable=True),
sa.Column("qualifier_id", sa.BIGINT(), nullable=True),
sa.ForeignKeyConstraint(["parent_id"], ["measurement.measurement_id"]),
sa.ForeignKeyConstraint(["physical_measurements_id"], ["physical_measurements.physical_measurements_id"]),
sa.ForeignKeyConstraint(["qualifier_id"], ["measurement.measurement_id"]),
sa.PrimaryKeyConstraint("measurement_id"),
)
op.create_table(
"measurement_to_qualifier",
sa.Column("measurement_id", sa.BIGINT(), nullable=False),
sa.Column("qualifier_id", sa.BIGINT(), nullable=False),
sa.ForeignKeyConstraint(["measurement_id"], ["measurement.measurement_id"]),
sa.ForeignKeyConstraint(["qualifier_id"], ["measurement.measurement_id"]),
sa.PrimaryKeyConstraint("measurement_id", "qualifier_id"),
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("measurement_to_qualifier")
op.drop_table("measurement")
# ### end Alembic commands ###
| bsd-3-clause | 6d766debdb3b5b7b1d855912986ddc26 | 37.346154 | 114 | 0.662989 | 3.767003 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/825eefb014f9_adding_w5nf_analysis_attributes.py | 1 | 4265 | """adding_w5nf_analysis_attributes
Revision ID: 825eefb014f9
Revises: 6cb9405f1549, 57515daf8448, 33b34f5ae271
Create Date: 2022-03-30 15:51:16.904983
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '825eefb014f9'
down_revision = ('6cb9405f1549', '57515daf8448', '33b34f5ae271')
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('genomic_w5nf_raw',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('file_path', sa.String(length=255), nullable=True),
sa.Column('ignore_flag', sa.SmallInteger(), nullable=False),
sa.Column('dev_note', sa.String(length=255), nullable=True),
sa.Column('biobank_id', sa.String(length=255), nullable=True),
sa.Column('sample_id', sa.String(length=255), nullable=True),
sa.Column('request_reason', sa.String(length=255), nullable=True),
sa.Column('request_reason_free', sa.String(length=512), nullable=True),
sa.Column('health_related_data_file_name', sa.String(length=255), nullable=True),
sa.Column('clinical_analysis_type', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_genomic_w5nf_raw_file_path'), 'genomic_w5nf_raw', ['file_path'], unique=False)
op.add_column('genomic_cvl_analysis', sa.Column('failed', sa.Integer(), nullable=False))
op.add_column('genomic_cvl_analysis', sa.Column('failed_request_reason', sa.String(length=255), nullable=True))
op.add_column('genomic_cvl_analysis', sa.Column('failed_request_reason_free', sa.String(length=512), nullable=True))
op.add_column('genomic_set_member', sa.Column('cvl_w5nf_hdr_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member', sa.Column('cvl_w5nf_pgx_manifest_job_run_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['cvl_w5nf_pgx_manifest_job_run_id'], ['id'])
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['cvl_w5nf_hdr_manifest_job_run_id'], ['id'])
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.drop_column('genomic_set_member', 'cvl_w5nf_pgx_manifest_job_run_id')
op.drop_column('genomic_set_member', 'cvl_w5nf_hdr_manifest_job_run_id')
op.drop_column('genomic_cvl_analysis', 'failed_request_reason_free')
op.drop_column('genomic_cvl_analysis', 'failed_request_reason')
op.drop_column('genomic_cvl_analysis', 'failed')
op.drop_index(op.f('ix_genomic_w5nf_raw_file_path'), table_name='genomic_w5nf_raw')
op.drop_table('genomic_w5nf_raw')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 78c3964e175fa660375ab0899509f1cc | 46.921348 | 125 | 0.721454 | 3.104076 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/operationdefinition_tests.py | 1 | 4857 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import operationdefinition
from .fhirdate import FHIRDate
class OperationDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("OperationDefinition", js["resourceType"])
return operationdefinition.OperationDefinition(js)
def testOperationDefinition1(self):
inst = self.instantiate_from("operationdefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a OperationDefinition instance")
self.implOperationDefinition1(inst)
js = inst.as_json()
self.assertEqual("OperationDefinition", js["resourceType"])
inst2 = operationdefinition.OperationDefinition(js)
self.implOperationDefinition1(inst2)
def implOperationDefinition1(self, inst):
self.assertEqual(inst.base, "OperationDefinition/Questionnaire-populate")
self.assertEqual(inst.code, "populate")
self.assertEqual(inst.comment, "Only implemented for Labs and Medications so far")
self.assertEqual(inst.contact[0].name, "System Administrator")
self.assertEqual(inst.contact[0].telecom[0].system, "email")
self.assertEqual(inst.contact[0].telecom[0].value, "beep@coyote.acme.com")
self.assertEqual(inst.date.date, FHIRDate("2015-08-04").date)
self.assertEqual(inst.date.as_json(), "2015-08-04")
self.assertEqual(inst.description, "Limited implementation of the Populate Questionnaire implementation")
self.assertEqual(inst.id, "example")
self.assertTrue(inst.instance)
self.assertEqual(inst.jurisdiction[0].coding[0].code, "GB")
self.assertEqual(inst.jurisdiction[0].coding[0].display, "United Kingdom of Great Britain and Northern Ireland (the)")
self.assertEqual(inst.jurisdiction[0].coding[0].system, "urn:iso:std:iso:3166")
self.assertEqual(inst.kind, "operation")
self.assertEqual(inst.name, "Populate Questionnaire")
self.assertEqual(inst.overload[0].parameterName[0], "subject")
self.assertEqual(inst.overload[0].parameterName[1], "local")
self.assertEqual(inst.overload[1].comment, "local defaults to false when not passed as a parameter")
self.assertEqual(inst.overload[1].parameterName[0], "subject")
self.assertEqual(inst.parameter[0].max, "1")
self.assertEqual(inst.parameter[0].min, 1)
self.assertEqual(inst.parameter[0].name, "subject")
self.assertEqual(inst.parameter[0].type, "Reference")
self.assertEqual(inst.parameter[0].use, "in")
self.assertEqual(inst.parameter[1].documentation, "If the *local* parameter is set to true, server information about the specified subject will be used to populate the instance.")
self.assertEqual(inst.parameter[1].max, "1")
self.assertEqual(inst.parameter[1].min, 0)
self.assertEqual(inst.parameter[1].name, "local")
self.assertEqual(inst.parameter[1].type, "Reference")
self.assertEqual(inst.parameter[1].use, "in")
self.assertEqual(inst.parameter[2].documentation, "The partially (or fully)-populated set of answers for the specified Questionnaire")
self.assertEqual(inst.parameter[2].max, "1")
self.assertEqual(inst.parameter[2].min, 1)
self.assertEqual(inst.parameter[2].name, "return")
self.assertEqual(inst.parameter[2].type, "QuestionnaireResponse")
self.assertEqual(inst.parameter[2].use, "out")
self.assertEqual(inst.publisher, "Acme Healthcare Services")
self.assertEqual(inst.resource[0], "Questionnaire")
self.assertEqual(inst.status, "draft")
self.assertFalse(inst.system)
self.assertEqual(inst.text.status, "generated")
self.assertFalse(inst.type)
self.assertEqual(inst.url, "http://h7.org/fhir/OperationDefinition/example")
self.assertEqual(inst.useContext[0].code.code, "venue")
self.assertEqual(inst.useContext[0].code.display, "Clinical Venue")
self.assertEqual(inst.useContext[0].code.system, "http://build.fhir.org/codesystem-usage-context-type")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].code, "IMP")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].display, "inpatient encounter")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.version, "B")
| bsd-3-clause | 276e7c08587cccf14dff97d4d24646a2 | 55.476744 | 187 | 0.698991 | 3.690729 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/imagingobjectselection_tests.py | 1 | 3701 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import io
import json
import os
import unittest
from . import imagingobjectselection
from .fhirdate import FHIRDate
class ImagingObjectSelectionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ImagingObjectSelection", js["resourceType"])
return imagingobjectselection.ImagingObjectSelection(js)
def testImagingObjectSelection1(self):
inst = self.instantiate_from("imagingobjectselection-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ImagingObjectSelection instance")
self.implImagingObjectSelection1(inst)
js = inst.as_json()
self.assertEqual("ImagingObjectSelection", js["resourceType"])
inst2 = imagingobjectselection.ImagingObjectSelection(js)
self.implImagingObjectSelection1(inst2)
def implImagingObjectSelection1(self, inst):
self.assertEqual(inst.authoringTime.date, FHIRDate("2014-11-20T11:01:20-08:00").date)
self.assertEqual(inst.authoringTime.as_json(), "2014-11-20T11:01:20-08:00")
self.assertEqual(inst.description, "1 SC image (screen snapshot) and 2 CT images to share a chest CT exam")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.study[0].series[0].instance[0].sopClass, "urn:oid:1.2.840.10008.5.1.4.1.1.7")
self.assertEqual(inst.study[0].series[0].instance[0].uid, "urn:oid:2.16.124.113543.6003.189642796.63084.16748.2599092902")
self.assertEqual(inst.study[0].series[0].instance[0].url, "http://localhost/wado/SCP/2.16.124.113543.6003.189642796.63084.16749.2599092904")
self.assertEqual(inst.study[0].series[0].uid, "urn:oid:2.16.124.113543.6003.189642796.63084.16750.2599092901")
self.assertEqual(inst.study[0].series[1].instance[0].sopClass, "urn:oid:1.2.840.10008.5.1.4.1.1.2")
self.assertEqual(inst.study[0].series[1].instance[0].uid, "urn:oid:2.16.124.113543.6003.189642796.63084.16748.2599092903")
self.assertEqual(inst.study[0].series[1].instance[0].url, "http://localhost/wado/SCP/2.16.124.113543.6003.189642796.63084.16748.2599092903")
self.assertEqual(inst.study[0].series[1].instance[1].sopClass, "urn:oid:1.2.840.10008.5.1.4.1.1.2")
self.assertEqual(inst.study[0].series[1].instance[1].uid, "urn:oid:2.16.124.113543.6003.189642796.63084.16748.2599092904")
self.assertEqual(inst.study[0].series[1].instance[1].url, "http://localhost/wado/SCP/2.16.124.113543.6003.189642796.63084.16750.2599092902")
self.assertEqual(inst.study[0].series[1].uid, "urn:oid:2.16.124.113543.6003.189642796.63084.16750.2599092902")
self.assertEqual(inst.study[0].uid, "urn:oid:2.16.124.113543.6003.189642796.63084.16749.2599092904")
self.assertEqual(inst.text.div, "<div>A set of images accompanying to an exam document, including one SC image and two CT images, to publish the exam sharing</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title.coding[0].code, "113030")
self.assertEqual(inst.title.coding[0].display, "Manifest")
self.assertEqual(inst.title.coding[0].system, "http://nema.org/dicom/dicm")
self.assertEqual(inst.title.text, "A set of objects that have been exported for sharing")
self.assertEqual(inst.uid, "urn:oid:2.16.124.113543.6003.189642796.63084.16748.2599092901")
| bsd-3-clause | 2354e2d230d7bbf952e55ba2fb5af5d3 | 61.728814 | 173 | 0.707647 | 2.87568 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/processresponse.py | 1 | 5987 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/ProcessResponse) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class ProcessResponse(domainresource.DomainResource):
""" ProcessResponse resource.
This resource provides processing status, errors and notes from the
processing of a resource.
"""
resource_type = "ProcessResponse"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.communicationRequest = None
""" Request for additional information.
List of `FHIRReference` items referencing `CommunicationRequest` (represented as `dict` in JSON). """
self.created = None
""" Creation date.
Type `FHIRDate` (represented as `str` in JSON). """
self.disposition = None
""" Disposition Message.
Type `str`. """
self.error = None
""" Error code.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.form = None
""" Printed Form Identifier.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.identifier = None
""" Business Identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.organization = None
""" Authoring Organization.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.outcome = None
""" Processing outcome.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.processNote = None
""" Processing comments or additional requirements.
List of `ProcessResponseProcessNote` items (represented as `dict` in JSON). """
self.request = None
""" Request reference.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.requestOrganization = None
""" Responsible organization.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.requestProvider = None
""" Responsible Practitioner.
Type `FHIRReference` referencing `Practitioner` (represented as `dict` in JSON). """
self.status = None
""" active | cancelled | draft | entered-in-error.
Type `str`. """
super(ProcessResponse, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ProcessResponse, self).elementProperties()
js.extend([
("communicationRequest", "communicationRequest", fhirreference.FHIRReference, True, None, False),
("created", "created", fhirdate.FHIRDate, False, None, False),
("disposition", "disposition", str, False, None, False),
("error", "error", codeableconcept.CodeableConcept, True, None, False),
("form", "form", codeableconcept.CodeableConcept, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("organization", "organization", fhirreference.FHIRReference, False, None, False),
("outcome", "outcome", codeableconcept.CodeableConcept, False, None, False),
("processNote", "processNote", ProcessResponseProcessNote, True, None, False),
("request", "request", fhirreference.FHIRReference, False, None, False),
("requestOrganization", "requestOrganization", fhirreference.FHIRReference, False, None, False),
("requestProvider", "requestProvider", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, False),
])
return js
from . import backboneelement
class ProcessResponseProcessNote(backboneelement.BackboneElement):
""" Processing comments or additional requirements.
Suite of processing notes or additional requirements if the processing has
been held.
"""
resource_type = "ProcessResponseProcessNote"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.text = None
""" Comment on the processing.
Type `str`. """
self.type = None
""" display | print | printoper.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(ProcessResponseProcessNote, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ProcessResponseProcessNote, self).elementProperties()
js.extend([
("text", "text", str, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
| bsd-3-clause | d38fc170dd46ab1e57a216edad31d057 | 37.625806 | 109 | 0.625522 | 4.525321 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/substance.py | 1 | 6510 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/Substance) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class Substance(domainresource.DomainResource):
""" A homogeneous material with a definite composition.
"""
resource_type = "Substance"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.category = None
""" What class/type of substance this is.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.code = None
""" What substance this is.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.description = None
""" Textual description of the substance, comments.
Type `str`. """
self.identifier = None
""" Unique identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.ingredient = None
""" Composition information about the substance.
List of `SubstanceIngredient` items (represented as `dict` in JSON). """
self.instance = None
""" If this describes a specific package/container of the substance.
List of `SubstanceInstance` items (represented as `dict` in JSON). """
self.status = None
""" active | inactive | entered-in-error.
Type `str`. """
super(Substance, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Substance, self).elementProperties()
js.extend([
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, True),
("description", "description", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("ingredient", "ingredient", SubstanceIngredient, True, None, False),
("instance", "instance", SubstanceInstance, True, None, False),
("status", "status", str, False, None, False),
])
return js
from . import backboneelement
class SubstanceIngredient(backboneelement.BackboneElement):
""" Composition information about the substance.
A substance can be composed of other substances.
"""
resource_type = "SubstanceIngredient"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.quantity = None
""" Optional amount (concentration).
Type `Ratio` (represented as `dict` in JSON). """
self.substanceCodeableConcept = None
""" A component of the substance.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.substanceReference = None
""" A component of the substance.
Type `FHIRReference` referencing `Substance` (represented as `dict` in JSON). """
super(SubstanceIngredient, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceIngredient, self).elementProperties()
js.extend([
("quantity", "quantity", ratio.Ratio, False, None, False),
("substanceCodeableConcept", "substanceCodeableConcept", codeableconcept.CodeableConcept, False, "substance", True),
("substanceReference", "substanceReference", fhirreference.FHIRReference, False, "substance", True),
])
return js
class SubstanceInstance(backboneelement.BackboneElement):
""" If this describes a specific package/container of the substance.
Substance may be used to describe a kind of substance, or a specific
package/container of the substance: an instance.
"""
resource_type = "SubstanceInstance"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.expiry = None
""" When no longer valid to use.
Type `FHIRDate` (represented as `str` in JSON). """
self.identifier = None
""" Identifier of the package/container.
Type `Identifier` (represented as `dict` in JSON). """
self.quantity = None
""" Amount of substance in the package.
Type `Quantity` (represented as `dict` in JSON). """
super(SubstanceInstance, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceInstance, self).elementProperties()
js.extend([
("expiry", "expiry", fhirdate.FHIRDate, False, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import ratio
except ImportError:
ratio = sys.modules[__package__ + '.ratio']
| bsd-3-clause | 81328c1e986250d1fe134abba676ccaf | 36.2 | 128 | 0.626882 | 4.389751 | false | false | false | false |
django/django-localflavor | localflavor/de/forms.py | 2 | 3538 | """DE-specific Form helpers."""
import re
from django.core.exceptions import ImproperlyConfigured
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.translation import gettext_lazy as _
from .de_states import STATE_CHOICES
ID_RE = re.compile(r"^(?P<residence>\d{10})(?P<origin>\w{1,3})"
r"[-\ ]?(?P<birthday>\d{7})[-\ ]?(?P<validity>\d{7})"
r"[-\ ]?(?P<checksum>\d{1})$")
class DEZipCodeField(RegexField):
"""A form field that validates input as a German zip code.
Valid zip codes consist of five digits.
"""
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX.'),
}
def __init__(self, **kwargs):
super().__init__(r'^([0]{1}[1-9]{1}|[1-9]{1}[0-9]{1})[0-9]{3}$', **kwargs)
class DEStateSelect(Select):
"""A Select widget that uses a list of DE states as its choices."""
def __init__(self, attrs=None):
super().__init__(attrs, choices=STATE_CHOICES)
class DEIdentityCardNumberField(CharField):
"""A German identity card number.
Checks the following rules to determine whether the number is valid:
* Conforms to the XXXXXXXXXXX-XXXXXXX-XXXXXXX-X format.
* No group consists entirely of zeroes.
* Included checksums match calculated checksums
Algorithm is documented at http://de.wikipedia.org/wiki/Personalausweis
"""
default_error_messages = {
'invalid': _('Enter a valid German identity card number in '
'XXXXXXXXXXX-XXXXXXX-XXXXXXX-X format.'),
}
def has_valid_checksum(self, number):
given_number, given_checksum = number[:-1], number[-1]
calculated_checksum = 0
parameter = 7
for item in given_number:
fragment = str(int(item) * parameter)
if fragment.isalnum():
calculated_checksum += int(fragment[-1])
if parameter == 1:
parameter = 7
elif parameter == 3:
parameter = 1
elif parameter == 7:
parameter = 3
return str(calculated_checksum)[-1] == given_checksum
def clean(self, value):
value = super().clean(value)
if value in self.empty_values:
return value
match = re.match(ID_RE, value)
if not match:
raise ValidationError(self.error_messages['invalid'], code='invalid')
id_parts = match.groupdict()
residence = id_parts['residence']
origin = id_parts['origin']
birthday = id_parts['birthday']
validity = id_parts['validity']
checksum = id_parts['checksum']
if (residence == '0000000000' or
birthday == '0000000' or
validity == '0000000'):
raise ValidationError(self.error_messages['invalid'], code='invalid')
all_digits = "%s%s%s%s" % (residence, birthday, validity, checksum)
if (not self.has_valid_checksum(residence) or
not self.has_valid_checksum(birthday) or
not self.has_valid_checksum(validity) or
not self.has_valid_checksum(all_digits)):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return '%s%s-%s-%s-%s' % (residence,
origin,
birthday,
validity,
checksum)
| bsd-3-clause | 9e7766db5c55aaff6c11c6aba75d3ebf | 33.019231 | 82 | 0.573488 | 4.142857 | false | false | false | false |
all-of-us/raw-data-repository | tests/api_tests/test_deceased_report_api.py | 1 | 36956 | from copy import deepcopy
from datetime import date, datetime, timedelta
import pytz
from rdr_service import config
from rdr_service.api_util import HEALTHPRO, PTC
from rdr_service.model.api_user import ApiUser
from rdr_service.model.deceased_report import DeceasedReport
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.participant_enums import DeceasedNotification, DeceasedReportDenialReason, DeceasedReportStatus,\
DeceasedStatus, SuspensionStatus, WithdrawalStatus
from tests.helpers.unittest_base import BaseTestCase
class DeceasedReportTestBase(BaseTestCase):
def overwrite_test_user_roles(self, roles):
new_user_info = deepcopy(config.getSettingJson(config.USER_INFO))
new_user_info['example@example.com']['roles'] = roles
self.temporarily_override_config_setting(config.USER_INFO, new_user_info)
@staticmethod
def get_deceased_report_id(response):
return int(response['identifier'][0]['value'])
class DeceasedReportApiTest(DeceasedReportTestBase):
def setUp(self):
super(DeceasedReportApiTest, self).setUp()
hpo = self.data_generator.create_database_hpo()
self.paired_participant_without_summary = self.data_generator.create_database_participant(hpoId=hpo.hpoId)
self.paired_participant_with_summary = self.data_generator.create_database_participant(hpoId=hpo.hpoId)
self.data_generator.create_database_participant_summary(participant=self.paired_participant_with_summary)
self.unpaired_participant_with_summary = self.data_generator.create_database_participant()
self.data_generator.create_database_participant_summary(participant=self.unpaired_participant_with_summary)
def post_report(self, report_json, participant_id=None, expected_status=200):
if participant_id is None:
participant_id = self.paired_participant_without_summary.participantId
return self.send_post(f'Participant/P{participant_id}/Observation',
request_data=report_json,
expected_status=expected_status)
def post_report_review(self, review_json, report_id, participant_id, expected_status=200):
return self.send_post(f'Participant/P{participant_id}/Observation/{report_id}/Review',
request_data=review_json,
expected_status=expected_status)
def get_report_from_db(self, report_id):
# The report might already be in the session, resetting just in case to make sure we get the latest data
self.session.commit()
self.session.close()
return self.session.query(DeceasedReport).filter(DeceasedReport.id == report_id).one()
def get_participant_summary_from_db(self, participant_id):
# The participant summary exists in the session, so we need to reset the session to query the database for
# new values
self.session.commit()
self.session.close()
return self.session.query(ParticipantSummary).filter(
ParticipantSummary.participantId == participant_id
).one()
@staticmethod
def build_deceased_report_json(status='preliminary', date_of_death='2020-01-01',
notification=DeceasedNotification.EHR, notification_other=None, user_system='system',
user_name='name', authored='2020-01-01T00:00:00Z', reporter_name='Jane Doe',
reporter_relation='SPOUSE', reporter_phone=None,
reporter_email=None, cause_of_death='Heart disease'):
report_json = {
'code': {
'text': 'DeceasedReport'
},
'status': status,
'effectiveDateTime': date_of_death,
'performer': [{
'type': user_system,
'reference': user_name
}],
'valueString': cause_of_death,
'issued': authored
}
encounter_json = {
'reference': str(notification)
}
if notification == DeceasedNotification.OTHER:
encounter_json['display'] = notification_other
report_json['encounter'] = encounter_json
if not (notification == DeceasedNotification.EHR or notification == DeceasedNotification.OTHER):
extensions = [{
'url': 'http://hl7.org/fhir/ValueSet/relatedperson-relationshiptype',
'valueCode': reporter_relation
}]
if reporter_email:
extensions.append({
'url': 'https://www.pmi-ops.org/email-address',
'valueString': reporter_email
})
if reporter_phone:
extensions.append({
'url': 'https://www.pmi-ops.org/phone-number',
'valueString': reporter_phone
})
report_json['extension'] = [{
'url': 'https://www.pmi-ops.org/deceased-reporter',
'valueHumanName': {
'text': reporter_name,
'extension': extensions
}
}]
return report_json
@staticmethod
def build_report_review_json(user_system='system', user_name='name', authored='2020-01-01T00:00:00Z',
status='final', denial_reason=DeceasedReportDenialReason.MARKED_IN_ERROR,
denial_reason_other='Another reason', date_of_death='2020-01-01'):
report_json = {
'code': {
'text': 'DeceasedReport'
},
'status': status,
'effectiveDateTime': date_of_death,
'performer': [{
'type': user_system,
'reference': user_name
}],
'issued': authored
}
if status == 'cancelled':
denial_reference = {
'reference': str(denial_reason)
}
if denial_reason == DeceasedReportDenialReason.OTHER:
denial_reference['display'] = denial_reason_other
report_json['extension'] = [{
'url': 'https://www.pmi-ops.org/observation-denial-reason',
'valueReference': denial_reference
}]
return report_json
def assertReportResponseMatches(self, expected, actual):
del actual['identifier']
del actual['subject']
del actual['resourceType']
if 'performer' in actual:
for performer_json in actual['performer']:
del performer_json['extension']
self.assertJsonResponseMatches(expected, actual, strip_tz=False)
def test_creating_minimal_deceased_report(self):
report_json = self.build_deceased_report_json(
status='preliminary',
date_of_death='2020-01-02',
notification=DeceasedNotification.EHR,
user_system='https://example.com',
user_name='me@test.com',
authored='2020-01-05T13:43:21Z',
cause_of_death='Heart disease'
)
response = self.post_report(report_json, participant_id=self.paired_participant_with_summary.participantId)
# Check data saved to the database
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedReportStatus.PENDING, created_report.status)
self.assertEqual(date(2020, 1, 2), created_report.dateOfDeath)
self.assertEqual(DeceasedNotification.EHR, created_report.notification)
self.assertEqual('https://example.com', created_report.author.system)
self.assertEqual('me@test.com', created_report.author.username)
self.assertEqual(datetime(2020, 1, 5, 13, 43, 21), created_report.authored)
self.assertEqual('Heart disease', created_report.causeOfDeath)
# Check participant summary data
participant_summary = self.get_participant_summary_from_db(
participant_id=self.paired_participant_with_summary.participantId
)
self.assertEqual(DeceasedStatus.PENDING, participant_summary.deceasedStatus)
self.assertEqual(datetime(2020, 1, 5, 13, 43, 21), participant_summary.deceasedAuthored)
self.assertEqual(date(2020, 1, 2), participant_summary.dateOfDeath)
# Check response for extra performer extension
performer_extension = response['performer'][0]['extension'][0]
self.assertEqual('https://www.pmi-ops.org/observation/authored', performer_extension['url'])
self.assertEqual('2020-01-05T13:43:21Z', performer_extension['valueDateTime'])
# Check that the rest of the response matches what was sent
self.assertReportResponseMatches(report_json, response)
def test_other_notification_method(self):
report_json = self.build_deceased_report_json(
notification=DeceasedNotification.OTHER,
notification_other='Another reason'
)
response = self.post_report(report_json)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedNotification.OTHER, created_report.notification)
self.assertEqual('Another reason', created_report.notificationOther)
self.assertReportResponseMatches(report_json, response)
def test_reporter_info(self):
report_json = self.build_deceased_report_json(
notification=DeceasedNotification.NEXT_KIN_SUPPORT,
reporter_name='Jane Doe',
reporter_relation='SPOUSE',
reporter_phone='123-456-7890',
reporter_email='jdoe@me.com'
)
response = self.post_report(report_json)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedNotification.NEXT_KIN_SUPPORT, created_report.notification)
self.assertEqual('Jane Doe', created_report.reporterName)
self.assertEqual('SPOUSE', created_report.reporterRelationship)
self.assertEqual('123-456-7890', created_report.reporterPhone)
self.assertEqual('jdoe@me.com', created_report.reporterEmail)
self.assertReportResponseMatches(report_json, response)
def test_naive_issued_timestamp(self):
report_json = self.build_deceased_report_json(
authored='2020-01-05T13:43:21'
)
response = self.post_report(report_json)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(datetime(2020, 1, 5, 13, 43, 21), created_report.authored)
self.assertEqual('2020-01-05T13:43:21Z', response['issued'])
def test_cst_issued_timestamp(self):
report_json = self.build_deceased_report_json(
authored='2020-01-05T13:43:21-06:00'
)
response = self.post_report(report_json)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(datetime(2020, 1, 5, 19, 43, 21), created_report.authored)
self.assertEqual('2020-01-05T19:43:21Z', response['issued'])
def test_post_with_invalid_fields(self):
# Check missing status response
report_json = self.build_deceased_report_json()
del report_json['status']
self.post_report(report_json, expected_status=400)
# Check unauthorized status when creating
report_json = self.build_deceased_report_json(status='final')
self.post_report(report_json, expected_status=400)
# Check missing code response
report_json = self.build_deceased_report_json()
del report_json['code']
self.post_report(report_json, expected_status=400)
# Check missing notification data response
report_json = self.build_deceased_report_json()
del report_json['encounter']
self.post_report(report_json, expected_status=400)
# Check missing 'other text' when notification is OTHER
report_json = self.build_deceased_report_json(notification=DeceasedNotification.OTHER)
del report_json['encounter']['display']
self.post_report(report_json, expected_status=400)
# Check for different states of missing author information
report_json = self.build_deceased_report_json()
del report_json['performer']
self.post_report(report_json, expected_status=400)
report_json = self.build_deceased_report_json()
del report_json['performer'][0]['type']
self.post_report(report_json, expected_status=400)
report_json = self.build_deceased_report_json()
del report_json['performer'][0]['reference']
self.post_report(report_json, expected_status=400)
# Check for missing authored date (referred to as 'issued' for FHIR compliance)
report_json = self.build_deceased_report_json()
del report_json['issued']
self.post_report(report_json, expected_status=400)
# Check for response when missing pieces of reporter information
report_json = self.build_deceased_report_json(notification=DeceasedNotification.NEXT_KIN_SUPPORT)
del report_json['extension']
self.post_report(report_json, expected_status=400)
report_json = self.build_deceased_report_json(notification=DeceasedNotification.NEXT_KIN_SUPPORT)
del report_json['extension'][0]['valueHumanName']['text']
self.post_report(report_json, expected_status=400)
report_json = self.build_deceased_report_json(notification=DeceasedNotification.NEXT_KIN_SUPPORT)
del report_json['extension'][0]['valueHumanName']['extension'][0] # deleting association (only required one)
self.post_report(report_json, expected_status=400)
# Try invalid status
report_json = self.build_deceased_report_json(status='unknown')
self.post_report(report_json, expected_status=400)
# Check for response when trying to use future date for authored
three_days_from_now = datetime.now() + timedelta(days=3)
report_json = self.build_deceased_report_json(authored=three_days_from_now.isoformat())
self.post_report(report_json, expected_status=400)
# Check for response when trying to use future date for date of death
three_days_from_now = date.today() + timedelta(days=3)
report_json = self.build_deceased_report_json(date_of_death=three_days_from_now.isoformat())
self.post_report(report_json, expected_status=400)
def test_post_with_only_required_fields(self):
report_json = self.build_deceased_report_json()
del report_json['effectiveDateTime']
del report_json['valueString']
response = self.post_report(report_json, participant_id=self.paired_participant_with_summary.participantId)
del response['effectiveDateTime']
self.assertReportResponseMatches(report_json, response)
participant_summary = self.get_participant_summary_from_db(
participant_id=self.paired_participant_with_summary.participantId
)
self.assertIsNone(participant_summary.dateOfDeath)
def test_other_roles_not_allowed_to_create(self):
report_json = self.build_deceased_report_json()
self.overwrite_test_user_roles(['testing'])
self.post_report(report_json, expected_status=403)
def test_health_pro_can_create(self):
report_json = self.build_deceased_report_json()
self.overwrite_test_user_roles([HEALTHPRO])
self.post_report(report_json)
def test_ptsc_can_create(self):
report_json = self.build_deceased_report_json()
self.overwrite_test_user_roles([PTC])
self.post_report(report_json)
def test_report_auto_approve(self):
# Deceased reports made for unpaired participants don't need second approval.
# So these reports should be approved upon creation.
unpaired_participant_id = self.unpaired_participant_with_summary.participantId
report_json = self.build_deceased_report_json()
response = self.post_report(report_json, participant_id=unpaired_participant_id)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status)
self.assertEqual('final', response['status'])
participant_summary = self.get_participant_summary_from_db(participant_id=unpaired_participant_id)
self.assertEqual(DeceasedStatus.APPROVED, participant_summary.deceasedStatus)
self.assertEqual(datetime(2020, 1, 1), participant_summary.deceasedAuthored)
def create_pending_deceased_report(self, participant_id=None, **kwargs):
if participant_id is None:
participant_id = self.paired_participant_without_summary.participantId
return self.data_generator.create_database_deceased_report(participantId=participant_id, **kwargs)
def test_multiple_pending_reports_not_allowed(self):
report = self.create_pending_deceased_report()
# Try creating another deceased report and check for Conflict status code
report_json = self.build_deceased_report_json()
self.post_report(report_json, participant_id=report.participantId, expected_status=409)
def test_approving_report(self):
report = self.create_pending_deceased_report(
participant_id=self.paired_participant_with_summary.participantId,
authored='2020-06-01T00:00:00Z',
)
review_json = self.build_report_review_json(
status='final',
authored='2020-07-01T00:00:00Z',
user_system='https://example.com',
user_name='reviewer@test.com'
)
review_response = self.post_report_review(review_json, report.id, report.participantId)
created_report = self.get_report_from_db(report.id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status)
self.assertEqual(datetime(2020, 7, 1), created_report.reviewed)
self.assertEqual('https://example.com', created_report.reviewer.system)
self.assertEqual('reviewer@test.com', created_report.reviewer.username)
self.assertEqual('final', review_response['status'])
participant_summary = self.get_participant_summary_from_db(participant_id=report.participantId)
self.assertEqual(DeceasedStatus.APPROVED, participant_summary.deceasedStatus)
self.assertEqual(datetime(2020, 7, 1), participant_summary.deceasedAuthored)
# Check create/approve performer dates in response
author_extension_json = review_response['performer'][0]['extension'][0]
self.assertEqual('https://www.pmi-ops.org/observation/authored', author_extension_json['url'])
self.assertEqual('2020-06-01T00:00:00Z', author_extension_json['valueDateTime'])
reviewer_extension_json = review_response['performer'][1]['extension'][0]
self.assertEqual('https://www.pmi-ops.org/observation/reviewed', reviewer_extension_json['url'])
self.assertEqual('2020-07-01T00:00:00Z', reviewer_extension_json['valueDateTime'])
def test_approving_can_overwrite_date_of_death(self):
participant_id = self.paired_participant_with_summary.participantId
report_json = self.build_deceased_report_json(date_of_death='2020-01-01')
response = self.post_report(report_json, participant_id=participant_id)
report_id = self.get_deceased_report_id(response)
participant_summary = self.get_participant_summary_from_db(participant_id=participant_id)
self.assertEqual(date(2020, 1, 1), participant_summary.dateOfDeath)
review_json = self.build_report_review_json(
date_of_death='2019-06-01'
)
self.post_report_review(review_json, report_id, participant_id)
created_report = self.get_report_from_db(report_id)
self.assertEqual(date(2019, 6, 1), created_report.dateOfDeath)
participant_summary = self.get_participant_summary_from_db(participant_id=participant_id)
self.assertEqual(date(2019, 6, 1), participant_summary.dateOfDeath)
def test_only_healthpro_can_review(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json()
self.overwrite_test_user_roles(['testing'])
self.post_report_review(review_json, report.id, report.participantId, expected_status=403)
self.overwrite_test_user_roles([PTC])
self.post_report_review(review_json, report.id, report.participantId, expected_status=403)
self.overwrite_test_user_roles([HEALTHPRO])
self.post_report_review(review_json, report.id, report.participantId, expected_status=200)
def test_report_denial(self):
report = self.create_pending_deceased_report(
participant_id=self.paired_participant_with_summary.participantId
)
review_json = self.build_report_review_json(
status='cancelled',
denial_reason=DeceasedReportDenialReason.OTHER,
denial_reason_other='Another reason'
)
review_response = self.post_report_review(review_json, report.id, report.participantId)
created_report = self.get_report_from_db(report.id)
self.assertEqual(DeceasedReportStatus.DENIED, created_report.status)
self.assertEqual(DeceasedReportDenialReason.OTHER, created_report.denialReason)
self.assertEqual('Another reason', created_report.denialReasonOther)
participant_summary = self.get_participant_summary_from_db(participant_id=report.participantId)
self.assertEqual(DeceasedStatus.UNSET, participant_summary.deceasedStatus)
self.assertIsNone(participant_summary.deceasedAuthored)
self.assertIsNone(participant_summary.dateOfDeath)
# Check that the denial reason comes through on the response
self.assertEqual('cancelled', review_response['status'])
denial_extension = review_response['extension'][0]['valueReference']
self.assertEqual('OTHER', denial_extension['reference'])
self.assertEqual('Another reason', denial_extension['display'])
def test_pending_report_not_allowed_when_approved_report_exists(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json()
self.post_report_review(review_json, report.id, report.participantId)
created_report = self.get_report_from_db(report.id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status,
"Test is built assuming an APPROVED report would be created")
# Try creating another deceased report and check for Conflict status code
report_json = self.build_deceased_report_json()
self.post_report(report_json, participant_id=report.participantId, expected_status=409)
def test_multiple_denied_reports(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json(status='cancelled')
self.post_report_review(review_json, report.id, report.participantId)
# Build another report and deny it too
report = self.create_pending_deceased_report(participant_id=report.participantId)
self.post_report_review(review_json, report.id, report.participantId)
# Try creating another deceased report, expecting it to work
report = self.create_pending_deceased_report(participant_id=report.participantId)
created_report = self.get_report_from_db(report.id)
self.assertEqual(DeceasedReportStatus.PENDING, created_report.status)
def test_approving_denied_report_not_allowed(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json(status='cancelled')
self.post_report_review(review_json, report.id, report.participantId)
# Try approving the denied report
review_json = self.build_report_review_json(status='final')
self.post_report_review(review_json, report.id, report.participantId, expected_status=400)
def test_denying_approved_report_not_allowed(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json(status='final')
self.post_report_review(review_json, report.id, report.participantId)
# Try approving the denied report
review_json = self.build_report_review_json(status='cancelled')
self.post_report_review(review_json, report.id, report.participantId, expected_status=400)
def test_api_users_not_duplicated(self):
report = self.create_pending_deceased_report()
created_report = self.get_report_from_db(report.id)
review_json = self.build_report_review_json(
user_system=created_report.author.system,
user_name=created_report.author.username
)
self.post_report_review(review_json, report.id, report.participantId)
self.assertEqual(1, self.session.query(ApiUser).count())
def test_participant_summary_fields_redacted(self):
"""Should still see contact information, but contact method should be updated for deceased participants"""
participant = self.data_generator.create_database_participant()
summary_obj = self.data_generator.create_database_participant_summary(
participant=participant,
phoneNumber='123-456-7890',
loginPhoneNumber='1-800-555-5555',
email='test@me.com',
streetAddress='123 Elm',
streetAddress2='Unit A',
city='Eureka',
zipCode='12345'
)
participant_id = participant.participantId
report_json = self.build_deceased_report_json(authored="2020-01-01T00:00:00Z")
response = self.post_report(report_json, participant_id=participant_id)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status,
"Test is built assuming an APPROVED report would be created")
summary_response = self.send_get(f'Participant/P{participant_id}/Summary')
for field_name, value in [
('phoneNumber', summary_obj.phoneNumber),
('loginPhoneNumber', summary_obj.loginPhoneNumber),
('email', summary_obj.email),
('streetAddress', summary_obj.streetAddress),
('streetAddress2', summary_obj.streetAddress2),
('city', summary_obj.city),
('zipCode', summary_obj.zipCode)
]:
self.assertEqual(value, summary_response[field_name])
self.assertEqual('NO_CONTACT', summary_response['recontactMethod'])
def test_participant_summary_redact_time_window(self):
# Fields should still be available for a short time window
participant = self.data_generator.create_database_participant()
self.data_generator.create_database_participant_summary(
participant=participant,
phoneNumber='123-456-7890'
)
participant_id = participant.participantId
yesterday = datetime.now() - timedelta(days=1)
report_json = self.build_deceased_report_json(authored=yesterday.isoformat())
response = self.post_report(report_json, participant_id=participant_id)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status,
"Test is built assuming an APPROVED report would be created")
summary_response = self.send_get(f'Participant/P{participant_id}/Summary')
self.assertEqual('123-456-7890', summary_response['phoneNumber'])
self.assertEqual('NO_CONTACT', summary_response['recontactMethod'])
class ParticipantDeceasedReportApiTest(DeceasedReportTestBase):
def test_report_list_for_participant(self):
participant = self.data_generator.create_database_participant()
self.data_generator.create_database_deceased_report(
participantId=participant.participantId,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 3, 18, tzinfo=pytz.utc)
)
self.data_generator.create_database_deceased_report(
participantId=participant.participantId,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 2, 27, tzinfo=pytz.utc)
)
self.data_generator.create_database_deceased_report(
participantId=participant.participantId,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 4, 1, tzinfo=pytz.utc)
)
report_list_response = self.send_get(f'Participant/P{participant.participantId}/DeceasedReport')
first_report = report_list_response[0] # Most recent report
self.assertEqual('cancelled', first_report['status'])
self.assertEqual('2020-04-01T00:00:00Z', first_report['issued'])
second_report = report_list_response[1]
self.assertEqual('cancelled', second_report['status'])
self.assertEqual('2020-03-18T00:00:00Z', second_report['issued'])
third_report = report_list_response[2]
self.assertEqual('cancelled', third_report['status'])
self.assertEqual('2020-02-27T00:00:00Z', third_report['issued'])
class SearchDeceasedReportApiTest(DeceasedReportTestBase):
def setUp(self):
super(SearchDeceasedReportApiTest, self).setUp()
# Shortening the following lines
create_participant_func = self.data_generator.create_database_participant
create_deceased_report_func = self.data_generator.create_database_deceased_report
unpaired_participant_id_1 = create_participant_func().participantId
self.unpaired_1_report_id = create_deceased_report_func(
participantId=unpaired_participant_id_1,
status=DeceasedReportStatus.PENDING,
authored=datetime(2020, 4, 1)
).id
unpaired_participant_id_2 = create_participant_func().participantId
self.unpaired_2_report_id = create_deceased_report_func(
participantId=unpaired_participant_id_2,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 1, 5)
).id
unpaired_participant_id_3 = create_participant_func().participantId
self.unpaired_3_report_id = create_deceased_report_func(
participantId=unpaired_participant_id_3,
status=DeceasedReportStatus.PENDING,
authored=datetime(2020, 2, 18)
).id
unpaired_suspended_participant_id = create_participant_func(
suspensionStatus=SuspensionStatus.NO_CONTACT
).participantId
create_deceased_report_func(
participantId=unpaired_suspended_participant_id,
status=DeceasedReportStatus.PENDING,
authored=datetime(2020, 2, 18)
)
test_org = self.data_generator.create_database_organization(externalId='TEST')
test_participant_1_id = create_participant_func(organizationId=test_org.organizationId).participantId
self.test_1_report_id = create_deceased_report_func(
participantId=test_participant_1_id,
status=DeceasedReportStatus.PENDING,
authored=datetime(2020, 12, 5)
).id
test_participant_2_id = create_participant_func(organizationId=test_org.organizationId).participantId
self.test_2_report_id = create_deceased_report_func(
participantId=test_participant_2_id,
status=DeceasedReportStatus.DENIED,
authored=datetime(2018, 1, 1), # Setting authored date in the past to check reviewed is used when ordering
reviewed=datetime(2020, 8, 9)
).id
test_participant_3_id = create_participant_func(organizationId=test_org.organizationId).participantId
self.test_3_report_id = create_deceased_report_func(
participantId=test_participant_3_id,
status=DeceasedReportStatus.APPROVED,
reviewed=datetime(2020, 2, 3)
).id
test_withdrawn_participant_id = create_participant_func(
organizationId=test_org.organizationId,
withdrawalStatus=WithdrawalStatus.NO_USE
).participantId
create_deceased_report_func(
participantId=test_withdrawn_participant_id,
status=DeceasedReportStatus.PENDING,
reviewed=datetime(2020, 2, 3)
)
other_org = self.data_generator.create_database_organization(externalId='')
other_participant_1_id = create_participant_func(organizationId=other_org.organizationId).participantId
self.other_1_report_id = create_deceased_report_func(
participantId=other_participant_1_id,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 5, 19)
).id
other_participant_2_id = create_participant_func(organizationId=other_org.organizationId).participantId
self.other_2_report_id = create_deceased_report_func(
participantId=other_participant_2_id,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 9, 5)
).id
other_participant_3_id = create_participant_func(organizationId=other_org.organizationId).participantId
self.other_3_report_id = create_deceased_report_func(
participantId=other_participant_3_id,
status=DeceasedReportStatus.APPROVED,
reviewed=datetime(2020, 9, 7)
).id
def assertListResponseMatches(self, expected_report_ids, actual_json):
self.assertEqual(len(expected_report_ids), len(actual_json), "Unexpected number of reports returned")
for index in range(len(expected_report_ids)):
expected_id = expected_report_ids[index]
report_json = actual_json[index]
self.assertEqual(int(expected_id), self.get_deceased_report_id(report_json), 'Report id mismatch')
def test_searching_api_by_status(self):
self.assertListResponseMatches([
self.other_2_report_id, # Authored 09/05
self.test_2_report_id, # Authored 08/09
self.other_1_report_id, # Authored 05/19
self.unpaired_2_report_id # Authored 01/05
], self.send_get(f'DeceasedReports?status=cancelled'))
# This also implicitly checks that the suspended and withdrawn participants are left out
self.assertListResponseMatches([
self.test_1_report_id, # Authored 12/05
self.unpaired_1_report_id, # Authored 04/01
self.unpaired_3_report_id # Authored 02/18
], self.send_get(f'DeceasedReports?status=preliminary'))
def test_searching_api_by_organization(self):
# This also implicitly checks that the withdrawn participant is left out
self.assertListResponseMatches([
self.test_1_report_id, # Authored 12/05
self.test_2_report_id, # Authored 08/09
self.test_3_report_id # Authored 02/03
], self.send_get(f'DeceasedReports?org_id=TEST'))
# This also implicitly checks that the suspended participant is left out
self.assertListResponseMatches([
self.unpaired_1_report_id, # Authored 04/01
self.unpaired_3_report_id, # Authored 02/18
self.unpaired_2_report_id # Authored 01/05
], self.send_get(f'DeceasedReports?org_id=UNSET'))
def test_searching_api_by_org_and_status(self):
self.assertListResponseMatches(
[],
self.send_get(f'DeceasedReports?org_id=OTHER&status=preliminary'))
self.assertListResponseMatches([
self.unpaired_1_report_id, # Authored 04/01
self.unpaired_3_report_id # Authored 02/18
], self.send_get(f'DeceasedReports?org_id=UNSET&status=preliminary'))
def test_searching_api_by_org_and_status(self):
self.overwrite_test_user_roles(['TEST'])
self.send_get(f'DeceasedReports', expected_status=403)
self.overwrite_test_user_roles([PTC])
self.send_get(f'DeceasedReports', expected_status=403)
self.overwrite_test_user_roles([HEALTHPRO])
self.send_get(f'DeceasedReports', expected_status=200)
| bsd-3-clause | d906529eb58fc5933c952d7e14ea2ca2 | 46.74677 | 120 | 0.666171 | 3.820531 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/questionnaire_tests.py | 1 | 14250 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import io
import json
import os
import unittest
from . import questionnaire
from .fhirdate import FHIRDate
class QuestionnaireTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Questionnaire", js["resourceType"])
return questionnaire.Questionnaire(js)
def testQuestionnaire1(self):
inst = self.instantiate_from("questionnaire-example-bluebook.json")
self.assertIsNotNone(inst, "Must have instantiated a Questionnaire instance")
self.implQuestionnaire1(inst)
js = inst.as_json()
self.assertEqual("Questionnaire", js["resourceType"])
inst2 = questionnaire.Questionnaire(js)
self.implQuestionnaire1(inst2)
def implQuestionnaire1(self, inst):
self.assertEqual(inst.date.date, FHIRDate("2013-02-19").date)
self.assertEqual(inst.date.as_json(), "2013-02-19")
self.assertEqual(inst.group.group[0].group[0].question[0].linkId, "nameOfChild")
self.assertEqual(inst.group.group[0].group[0].question[0].text, "Name of child")
self.assertEqual(inst.group.group[0].group[0].question[1].linkId, "sex")
self.assertEqual(inst.group.group[0].group[0].question[1].text, "Sex")
self.assertEqual(inst.group.group[0].group[1].linkId, "neonatalInformation")
self.assertEqual(inst.group.group[0].group[1].question[0].linkId, "birthWeight")
self.assertEqual(inst.group.group[0].group[1].question[0].text, "Birth weight (kg)")
self.assertEqual(inst.group.group[0].group[1].question[1].linkId, "birthLength")
self.assertEqual(inst.group.group[0].group[1].question[1].text, "Birth length (cm)")
self.assertEqual(inst.group.group[0].group[1].question[2].group[0].extension[0].url, "http://example.org/Profile/questionnaire#visibilityCondition")
self.assertEqual(inst.group.group[0].group[1].question[2].group[0].extension[0].valueString, "HAS_VALUE(../choice/code) AND NEQ(../choice/code,'NO')")
self.assertEqual(inst.group.group[0].group[1].question[2].group[0].linkId, "vitaminKgivenDoses")
self.assertEqual(inst.group.group[0].group[1].question[2].group[0].question[0].linkId, "vitaminiKDose1")
self.assertEqual(inst.group.group[0].group[1].question[2].group[0].question[0].text, "1st dose")
self.assertEqual(inst.group.group[0].group[1].question[2].group[0].question[1].linkId, "vitaminiKDose2")
self.assertEqual(inst.group.group[0].group[1].question[2].group[0].question[1].text, "2nd dose")
self.assertEqual(inst.group.group[0].group[1].question[2].linkId, "vitaminKgiven")
self.assertEqual(inst.group.group[0].group[1].question[2].text, "Vitamin K given")
self.assertEqual(inst.group.group[0].group[1].question[3].group[0].question[0].linkId, "hepBgivenDate")
self.assertEqual(inst.group.group[0].group[1].question[3].group[0].question[0].text, "Date given")
self.assertEqual(inst.group.group[0].group[1].question[3].linkId, "hepBgiven")
self.assertEqual(inst.group.group[0].group[1].question[3].text, "Hep B given y / n")
self.assertEqual(inst.group.group[0].group[1].question[4].linkId, "abnormalitiesAtBirth")
self.assertEqual(inst.group.group[0].group[1].question[4].text, "Abnormalities noted at birth")
self.assertEqual(inst.group.group[0].group[1].title, "Neonatal Information")
self.assertEqual(inst.group.group[0].linkId, "birthDetails")
self.assertEqual(inst.group.group[0].title, "Birth details - To be completed by health professional")
self.assertEqual(inst.group.linkId, "PHR")
self.assertTrue(inst.group.required)
self.assertEqual(inst.group.title, "NSW Government My Personal Health Record")
self.assertEqual(inst.id, "bb")
self.assertEqual(inst.publisher, "New South Wales Department of Health")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.subjectType[0], "Patient")
self.assertEqual(inst.text.status, "generated")
def testQuestionnaire2(self):
inst = self.instantiate_from("questionnaire-example-f201-lifelines.json")
self.assertIsNotNone(inst, "Must have instantiated a Questionnaire instance")
self.implQuestionnaire2(inst)
js = inst.as_json()
self.assertEqual("Questionnaire", js["resourceType"])
inst2 = questionnaire.Questionnaire(js)
self.implQuestionnaire2(inst2)
def implQuestionnaire2(self, inst):
self.assertEqual(inst.date.date, FHIRDate("2010").date)
self.assertEqual(inst.date.as_json(), "2010")
self.assertEqual(inst.group.concept[0].code, "VL 1-1, 18-65_1.2.2")
self.assertEqual(inst.group.concept[0].display, "Lifelines Questionnaire 1 part 1")
self.assertEqual(inst.group.concept[0].system, "http://example.org/system/code/lifelines/nl")
self.assertEqual(inst.group.group[0].linkId, "1")
self.assertEqual(inst.group.group[0].question[0].linkId, "1.1")
self.assertEqual(inst.group.group[0].question[0].text, "Do you have allergies?")
self.assertEqual(inst.group.group[1].linkId, "2")
self.assertEqual(inst.group.group[1].question[0].linkId, "2.1")
self.assertEqual(inst.group.group[1].question[0].text, "What is your gender?")
self.assertEqual(inst.group.group[1].question[1].linkId, "2.2")
self.assertEqual(inst.group.group[1].question[1].text, "What is your date of birth?")
self.assertEqual(inst.group.group[1].question[2].linkId, "2.3")
self.assertEqual(inst.group.group[1].question[2].text, "What is your country of birth?")
self.assertEqual(inst.group.group[1].question[3].linkId, "2.4")
self.assertEqual(inst.group.group[1].question[3].text, "What is your marital status?")
self.assertEqual(inst.group.group[1].text, "General questions")
self.assertEqual(inst.group.group[2].linkId, "3")
self.assertEqual(inst.group.group[2].question[0].linkId, "3.1")
self.assertEqual(inst.group.group[2].question[0].text, "Do you smoke?")
self.assertEqual(inst.group.group[2].question[1].linkId, "3.2")
self.assertEqual(inst.group.group[2].question[1].text, "Do you drink alchohol?")
self.assertEqual(inst.group.group[2].title, "Intoxications")
self.assertEqual(inst.group.linkId, "root")
self.assertTrue(inst.group.required)
self.assertEqual(inst.id, "f201")
self.assertEqual(inst.status, "published")
self.assertEqual(inst.subjectType[0], "Patient")
self.assertEqual(inst.text.status, "generated")
def testQuestionnaire3(self):
inst = self.instantiate_from("questionnaire-example-gcs.json")
self.assertIsNotNone(inst, "Must have instantiated a Questionnaire instance")
self.implQuestionnaire3(inst)
js = inst.as_json()
self.assertEqual("Questionnaire", js["resourceType"])
inst2 = questionnaire.Questionnaire(js)
self.implQuestionnaire3(inst2)
def implQuestionnaire3(self, inst):
self.assertEqual(inst.contained[0].id, "motor")
self.assertEqual(inst.contained[1].id, "verbal")
self.assertEqual(inst.contained[2].id, "eye")
self.assertEqual(inst.date.date, FHIRDate("2015-08-03").date)
self.assertEqual(inst.date.as_json(), "2015-08-03")
self.assertEqual(inst.group.concept[0].code, "9269-2")
self.assertEqual(inst.group.concept[0].system, "http://loinc.org")
self.assertEqual(inst.group.linkId, "1")
self.assertEqual(inst.group.question[0].concept[0].code, "9270-0")
self.assertEqual(inst.group.question[0].concept[0].system, "http://loinc.org")
self.assertEqual(inst.group.question[0].linkId, "1.1")
self.assertEqual(inst.group.question[0].type, "choice")
self.assertEqual(inst.group.question[1].concept[0].code, "9268-4")
self.assertEqual(inst.group.question[1].concept[0].system, "http://loinc.org")
self.assertEqual(inst.group.question[1].linkId, "1.2")
self.assertEqual(inst.group.question[1].type, "choice")
self.assertEqual(inst.group.question[2].concept[0].code, "9267-6")
self.assertEqual(inst.group.question[2].concept[0].system, "http://loinc.org")
self.assertEqual(inst.group.question[2].linkId, "1.3")
self.assertEqual(inst.group.question[2].type, "choice")
self.assertTrue(inst.group.required)
self.assertEqual(inst.group.title, "Glasgow Coma Score")
self.assertEqual(inst.id, "gcs")
self.assertEqual(inst.publisher, "FHIR Project team")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.subjectType[0], "Patient")
self.assertEqual(inst.text.status, "generated")
def testQuestionnaire4(self):
inst = self.instantiate_from("questionnaire-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Questionnaire instance")
self.implQuestionnaire4(inst)
js = inst.as_json()
self.assertEqual("Questionnaire", js["resourceType"])
inst2 = questionnaire.Questionnaire(js)
self.implQuestionnaire4(inst2)
def implQuestionnaire4(self, inst):
self.assertEqual(inst.contained[0].id, "yesno")
self.assertEqual(inst.date.date, FHIRDate("2012-01").date)
self.assertEqual(inst.date.as_json(), "2012-01")
self.assertEqual(inst.group.group[0].concept[0].code, "COMORBIDITY")
self.assertEqual(inst.group.group[0].concept[0].system, "http://example.org/system/code/sections")
self.assertEqual(inst.group.group[0].linkId, "1.1")
self.assertEqual(inst.group.group[0].question[0].concept[0].code, "COMORB")
self.assertEqual(inst.group.group[0].question[0].concept[0].system, "http://example.org/system/code/questions")
self.assertEqual(inst.group.group[0].question[0].group[0].concept[0].code, "CARDIAL")
self.assertEqual(inst.group.group[0].question[0].group[0].concept[0].system, "http://example.org/system/code/sections")
self.assertEqual(inst.group.group[0].question[0].group[0].linkId, "1.1.1.1")
self.assertEqual(inst.group.group[0].question[0].group[0].question[0].concept[0].code, "COMORBCAR")
self.assertEqual(inst.group.group[0].question[0].group[0].question[0].concept[0].system, "http://example.org/system/code/questions")
self.assertEqual(inst.group.group[0].question[0].group[0].question[0].linkId, "1.1.1.1.1")
self.assertEqual(inst.group.group[0].question[0].group[0].question[0].type, "choice")
self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[0].code, "COMCAR00")
self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[0].display, "Angina Pectoris")
self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[0].system, "http://example.org/system/code/questions")
self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[1].code, "194828000")
self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[1].display, "Angina (disorder)")
self.assertEqual(inst.group.group[0].question[0].group[0].question[1].concept[1].system, "http://snomed.info/sct")
self.assertEqual(inst.group.group[0].question[0].group[0].question[1].linkId, "1.1.1.1.2")
self.assertEqual(inst.group.group[0].question[0].group[0].question[1].type, "choice")
self.assertEqual(inst.group.group[0].question[0].group[0].question[2].concept[0].code, "22298006")
self.assertEqual(inst.group.group[0].question[0].group[0].question[2].concept[0].display, "Myocardial infarction (disorder)")
self.assertEqual(inst.group.group[0].question[0].group[0].question[2].concept[0].system, "http://snomed.info/sct")
self.assertEqual(inst.group.group[0].question[0].group[0].question[2].linkId, "1.1.1.1.3")
self.assertEqual(inst.group.group[0].question[0].group[0].question[2].type, "choice")
self.assertEqual(inst.group.group[0].question[0].group[1].concept[0].code, "VASCULAR")
self.assertEqual(inst.group.group[0].question[0].group[1].concept[0].system, "http://example.org/system/code/sections")
self.assertEqual(inst.group.group[0].question[0].group[1].linkId, "1.1.1.2")
self.assertEqual(inst.group.group[0].question[0].linkId, "1.1.1")
self.assertEqual(inst.group.group[0].question[0].type, "choice")
self.assertEqual(inst.group.group[1].concept[0].code, "HISTOPATHOLOGY")
self.assertEqual(inst.group.group[1].concept[0].system, "http://example.org/system/code/sections")
self.assertEqual(inst.group.group[1].group[0].concept[0].code, "ABDOMINAL")
self.assertEqual(inst.group.group[1].group[0].concept[0].system, "http://example.org/system/code/sections")
self.assertEqual(inst.group.group[1].group[0].linkId, "1.2.1")
self.assertEqual(inst.group.group[1].group[0].question[0].concept[0].code, "STADPT")
self.assertEqual(inst.group.group[1].group[0].question[0].concept[0].display, "pT category")
self.assertEqual(inst.group.group[1].group[0].question[0].concept[0].system, "http://example.org/system/code/questions")
self.assertEqual(inst.group.group[1].group[0].question[0].linkId, "1.2.1.2")
self.assertEqual(inst.group.group[1].linkId, "1.2")
self.assertEqual(inst.group.linkId, "1")
self.assertTrue(inst.group.required)
self.assertEqual(inst.group.title, "Cancer Quality Forum Questionnaire 2012")
self.assertEqual(inst.id, "3141")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.subjectType[0], "Patient")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | 800ba8483277d72abe447c43a419acf2 | 64.972222 | 158 | 0.678456 | 3.215253 | false | false | false | false |
all-of-us/raw-data-repository | tests/service_tests/test_data_dictionary_updater.py | 1 | 19671 | from datetime import datetime
from rdr_service.services.data_dictionary_updater import DataDictionaryUpdater, changelog_tab_id, dictionary_tab_id,\
internal_tables_tab_id, hpo_key_tab_id, questionnaire_key_tab_id, site_key_tab_id
from tests.service_tests.test_google_sheets_client import GoogleSheetsTestBase
class DataDictionaryUpdaterTest(GoogleSheetsTestBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.uses_database = True
def setUp(self, **kwargs) -> None:
super(DataDictionaryUpdaterTest, self).setUp(**kwargs)
self.mock_rdr_version = '1.97.1'
self.updater = DataDictionaryUpdater('', self.mock_rdr_version, self.session)
@classmethod
def _default_tab_names(cls):
return [
changelog_tab_id, dictionary_tab_id, internal_tables_tab_id,
hpo_key_tab_id, questionnaire_key_tab_id, site_key_tab_id
]
def _get_tab_rows(self, tab_id):
for tab_data in self._get_uploaded_sheet_data():
if tab_data['range'][1:].startswith(tab_id): # slicing to get rid of the quot used in the reference
return tab_data['values']
return None
def assert_has_row(self, table_name, column_name, tab_values, expected_data_type=None, expected_description=None,
expected_unique_value_count=None, expected_unique_values_list=None,
expected_value_meaning_map=None, expected_is_primary_key=None,
expected_is_foreign_key=None, expected_foreign_key_target_table_fields=None,
expected_foreign_key_target_columns=None, expected_deprecated_note=None,
expected_rdr_version=None):
# Find a row with the given table and column names
row_found = False
for row_values in tab_values:
row_values_dict = {
'row_table_name': None, 'row_column_name': None, 'table_column_concat': None, 'data_type': None,
'description': None, 'unique_value_count': None, 'unique_value_list': None, 'value_meaning_map': None,
'values_key': None, 'is_primary_key': None, 'is_foreign_key': None,
'foreign_key_target_table_fields': None, 'foreign_key_target_columns': None, 'deprecated_note': None,
'rdr_version': None
}
row_values_dict.update(zip(row_values_dict, row_values))
if row_values_dict['row_table_name'] == table_name and row_values_dict['row_column_name'] == column_name:
row_found = True
# Compare the values displayed for the column in the data-dictionary values with the expected values
if expected_data_type:
self.assertEqual(expected_data_type, row_values_dict['data_type'])
if expected_description:
self.assertEqual(expected_description, row_values_dict['description'])
if expected_unique_value_count:
self.assertEqual(expected_unique_value_count, row_values_dict['unique_value_count'])
if expected_unique_values_list:
self.assertEqual(expected_unique_values_list, row_values_dict['unique_value_list'])
if expected_value_meaning_map:
self.assertEqual(expected_value_meaning_map, row_values_dict['value_meaning_map'])
if expected_is_primary_key is not None:
self.assertEqual('Yes' if expected_is_primary_key else 'No', row_values_dict['is_primary_key'])
if expected_is_foreign_key is not None:
self.assertEqual('Yes' if expected_is_foreign_key else 'No', row_values_dict['is_foreign_key'])
if expected_foreign_key_target_table_fields:
self.assertEqual(
expected_foreign_key_target_table_fields,
row_values_dict['foreign_key_target_table_fields']
)
if expected_foreign_key_target_columns:
self.assertEqual(expected_foreign_key_target_columns, row_values_dict['foreign_key_target_columns'])
if expected_deprecated_note:
self.assertEqual(expected_deprecated_note, row_values_dict['deprecated_note'])
if expected_rdr_version:
self.assertEqual(expected_rdr_version, row_values_dict['rdr_version'])
if not row_found:
self.fail(f'{table_name}.{column_name} not found in results')
def test_updating_data_dictionary_tab(self):
self.updater.run_update()
dictionary_tab_rows = self._get_tab_rows(dictionary_tab_id)
# Check for some generic columns and definitions
self.assert_has_row(
'participant_summary', 'deceased_status', dictionary_tab_rows,
expected_data_type='SMALLINT(6)',
expected_description="Indicates whether the participant has a PENDING or APPROVED deceased reports.\n\n"
"Will be UNSET for participants that have no deceased reports or only DENIED reports."
)
self.assert_has_row(
'biobank_stored_sample', 'disposed', dictionary_tab_rows,
expected_data_type='DATETIME',
expected_description="The datetime at which the sample was disposed of"
)
# Check that the update date gets written
timestamp_cell_value = dictionary_tab_rows[1][0]
today = datetime.today()
self.assertEqual(f'Last Updated: {today.month}/{today.day}/{today.year}', timestamp_cell_value)
def test_show_unique_values(self):
# Create some data for checking the dictionary values list
self.data_generator.create_database_participant(participantOrigin='test')
self.updater.run_update()
dictionary_tab_rows = self._get_tab_rows(dictionary_tab_id)
# Check that enumerations show the value meanings and unique value count
# This check assumes that Organization's isObsolete property is based on an Enum,
# and that the test data only has Organizations that don't have isObsolete set
self.assert_has_row(
'organization', 'is_obsolete', dictionary_tab_rows,
expected_unique_values_list='NULL',
expected_unique_value_count='1',
expected_value_meaning_map="ACTIVE = 0, OBSOLETE = 1"
)
# Check that a column will show unique values when it is explicitly set to
# This check assumes that Participant's participantOrigin is set to show unique values
self.assert_has_row(
'participant', 'participant_origin', dictionary_tab_rows,
expected_unique_values_list='test',
expected_unique_value_count='1',
expected_value_meaning_map=''
)
def test_primary_and_foreign_key_columns(self):
self.updater.run_update()
dictionary_tab_rows = self._get_tab_rows(dictionary_tab_id)
# Check the primary key column indicator
self.assert_has_row('participant', 'participant_id', dictionary_tab_rows, expected_is_primary_key=True)
# Check the foreign key column indicator
self.assert_has_row('participant', 'site_id', dictionary_tab_rows, expected_is_foreign_key=True)
# Check the foreign key column target fields
self.assert_has_row(
'participant', 'site_id', dictionary_tab_rows,
expected_foreign_key_target_table_fields='site.site_id',
expected_foreign_key_target_columns='site_id'
)
def test_internal_tab_values(self):
self.updater.run_update()
internal_tab_rows = self._get_tab_rows(internal_tables_tab_id)
# Check that ORM mapped tables can appear in the internal tab when marked as internal
self.assert_has_row('bigquery_sync', 'id', internal_tab_rows)
# Check that the update date gets written
timestamp_cell_value = internal_tab_rows[1][1]
today = datetime.today()
self.assertEqual(f'Last Updated: {today.month}/{today.day}/{today.year}', timestamp_cell_value)
def test_hpo_and_site_key_tabs(self):
# Create hpo and site for test
self.data_generator.create_database_hpo(hpoId=1000, name='DictionaryTest', displayName='Dictionary Test')
test_org = self.data_generator.create_database_organization(displayName='Test Org', externalId='test_org')
self.data_generator.create_database_site(
siteId=4000, siteName='Test', googleGroup='test_site_group', organizationId=test_org.organizationId
)
self.updater.run_update()
# Check that the expected hpo row gets into the spreadsheet
hpo_rows = self._get_tab_rows(hpo_key_tab_id)
self.assertIn(['1000', 'DictionaryTest', 'Dictionary Test'], hpo_rows)
# Check that the expected site row gets into the spreadsheet
site_rows = self._get_tab_rows(site_key_tab_id)
self.assertIn(['4000', 'Test', 'test_site_group', test_org.externalId, test_org.displayName], site_rows)
def test_questionnaire_key_tab(self):
# Create two questionnaires for the test, one without any responses and another that has one
# Also make one a scheduling survey to check the PPI survey indicator
code = self.data_generator.create_database_code(display='Test Questionnaire', value='test_questionnaire')
scheduling_code = self.data_generator.create_database_code(
display='Scheduling Survey',
value='Scheduling'
)
no_response_questionnaire = self.data_generator.create_database_questionnaire_history()
self.data_generator.create_database_questionnaire_concept(
questionnaireId=no_response_questionnaire.questionnaireId,
questionnaireVersion=no_response_questionnaire.version,
codeId=code.codeId
)
response_questionnaire = self.data_generator.create_database_questionnaire_history()
self.data_generator.create_database_questionnaire_concept(
questionnaireId=response_questionnaire.questionnaireId,
questionnaireVersion=response_questionnaire.version,
codeId=scheduling_code.codeId
)
participant = self.data_generator.create_database_participant()
self.data_generator.create_database_questionnaire_response(
questionnaireId=response_questionnaire.questionnaireId,
questionnaireVersion=response_questionnaire.version,
participantId=participant.participantId
)
# Check that the questionnaire values output as expected
self.updater.run_update()
questionnaire_values = self._get_tab_rows(questionnaire_key_tab_id)
self.assertIn(
[str(no_response_questionnaire.questionnaireId), code.display, code.value, 'N', 'Y'],
questionnaire_values
)
self.assertIn(
[str(response_questionnaire.questionnaireId),
scheduling_code.display, scheduling_code.value, 'Y', 'N'],
questionnaire_values
)
def _mock_tab_data(self, tab_id, *rows):
default_tab_values = {tab_id: [self._empty_cell] for tab_id in self.default_tab_names}
default_tab_values[tab_id] = [
*rows
]
self.mock_spreadsheets_return.get.return_value.execute.return_value = {
'sheets': [{
'properties': {'title': tab_name},
'data': [{'rowData': [{'values': row_values} for row_values in tab_rows]}]
} for tab_name, tab_rows in default_tab_values.items()]
}
def _mock_data_dictionary_rows(self, *rows):
self._mock_tab_data(
dictionary_tab_id,
[self._empty_cell],
[self._empty_cell],
[self._empty_cell],
[self._empty_cell],
*[[self._empty_cell, *row] for row in rows]
)
def test_version_added_display(self):
"""Verify that rows for the data-dictionary show what RDR version they were added in"""
# Set the spreadsheet up to have a previously existing record that shouldn't have the version number changed
self._mock_data_dictionary_rows(
# Add a row for participant id that gives RDR version 1.1 (using expansion to fill in the middle cells)
[self._mock_cell('participant'), self._mock_cell('participant_id'),
*([self._empty_cell] * 12), self._mock_cell('1.2.1')]
)
self.updater.run_update()
dictionary_tab_rows = self._get_tab_rows(dictionary_tab_id)
# Check that a column that wasn't already on the spreadsheet shows the new version number
self.assert_has_row('participant', 'biobank_id', dictionary_tab_rows,
expected_rdr_version=self.mock_rdr_version)
# Check that previous RDR version values are maintained
self.assert_has_row('participant', 'participant_id', dictionary_tab_rows, expected_rdr_version='1.2.1')
def test_version_in_deprecation_note(self):
"""Verify that rows for the data-dictionary show what RDR version they were deprecated in"""
# Set a previously existing record that doesn't have a deprecation note, but will get one in an update
# This test assumes there is something in a current model that is marked as deprecated.
self._mock_data_dictionary_rows(
# Add a row for participant_summary's ehr_status column (using expansion to fill in the middle cells)
[self._mock_cell('participant_summary'), self._mock_cell('ehr_status')]
)
self.updater.run_update()
dictionary_tab_rows = self._get_tab_rows(dictionary_tab_id)
# Check that previous RDR version values are maintained
self.assert_has_row('participant_summary', 'ehr_status', dictionary_tab_rows,
expected_deprecated_note=f'Deprecated in {self.mock_rdr_version}: '
'Use wasEhrDataAvailable (was_ehr_data_available) instead')
def test_existing_deprecation_note_left_alone(self):
"""
Verify that rows that already have deprecation notes don't update again (so that the version stays the same)
"""
# Set a previously existing record that doesn't have a deprecation note, but will get one in an update
# This test assumes there is something in a current model that is marked as deprecated.
deprecation_note_with_version = 'Deprecated in 1.1.1: use something else'
self._mock_data_dictionary_rows(
# Add note for participant_summary's ehr_status column (using expansion to fill in the middle cells)
[self._mock_cell('participant_summary'), self._mock_cell('ehr_status'),
*([self._empty_cell] * 11), self._mock_cell(deprecation_note_with_version)]
)
self.updater.run_update()
dictionary_tab_rows = self._get_tab_rows(dictionary_tab_id)
# Check that previous RDR version values are maintained
self.assert_has_row('participant_summary', 'ehr_status', dictionary_tab_rows,
expected_deprecated_note=deprecation_note_with_version)
def test_changelog_adding_and_removing_rows(self):
"""Check that adding and removing columns from the data-dictionary gets recorded in the changelog"""
# Create something in the data-dictionary that will be removed because it isn't in the current schema
self._mock_data_dictionary_rows(
[self._mock_cell('table_that_never_existed'), self._mock_cell('id')]
)
self.updater.run_update()
# Check that the changelog shows that we're adding something that is in our current schema, and
# removing the dictionary record that isn't
data_dictionary_change_log = self.updater.changelog[dictionary_tab_id]
self.assertEqual('adding', data_dictionary_change_log.get(('participant', 'participant_id')))
self.assertEqual('removing', data_dictionary_change_log.get(('table_that_never_existed', 'id')))
def test_change_log_when_updating_schema_row(self):
"""Show that the changelog displays what changed when updating a row in the data-dictionary"""
# Mock that participant table's participant_id is a VARCHAR described as a "Random string"
# that is not the primary key but is indicated as a foreign key to another table
self._mock_data_dictionary_rows(
[self._mock_cell('participant'), self._mock_cell('participant_id'), self._empty_cell,
self._mock_cell('VARCHAR'), self._mock_cell('Random string'), *([self._empty_cell] * 5),
self._mock_cell('Yes')]
)
self.updater.run_update()
# Check the change log and verify the changes shown for the participant_id column
data_dictionary_change_log = self.updater.changelog[dictionary_tab_id]
list_of_participant_id_changes = data_dictionary_change_log.get(('participant', 'participant_id'))
self.assertIn('DATA_TYPE: changing from: "VARCHAR" to "INTEGER(11)"', list_of_participant_id_changes)
self.assertIn('DESCRIPTION: changing from: "Random string" to '
'"PMI-specific ID generated by the RDR and used for tracking/linking participant data.\n'
'10-character string beginning with P."', list_of_participant_id_changes)
self.assertIn('PRIMARY_KEY_INDICATOR: changing from: "" to "Yes"', list_of_participant_id_changes)
self.assertIn('FOREIGN_KEY_INDICATOR: changing from: "Yes" to "No"', list_of_participant_id_changes)
def test_key_tab_change_indicator(self):
self.updater.run_update()
# For now the changelog just says whether something was changed on the key tabs.
# Check to make sure it's set appropriately.
# This test assumes there are no Questionnaires in the database (that way the questionnair key tab stays
# unchanged/empty and there wouldn't be any changes for it.
self.assertTrue(self.updater.changelog[hpo_key_tab_id])
self.assertFalse(self.updater.changelog[questionnaire_key_tab_id])
def test_change_log_message(self):
# Mock some messages on the change log tab
self._mock_tab_data(
changelog_tab_id,
[self._empty_cell],
[self._mock_cell('4'), self._mock_cell('adding all fields'), self._mock_cell('1/3/20'),
self._mock_cell('1.1.3'), self._mock_cell('test@one.com')],
[self._mock_cell('6'), self._mock_cell('removing all fields'), self._mock_cell('10/31/20'),
self._mock_cell('1.70.1'), self._mock_cell('test@two.com')]
)
self.updater.download_dictionary_values()
self.updater.find_data_dictionary_diff()
self.updater.upload_changes('adding them back again', 'test@three.com')
# Check the change log values and make sure the new message was uploaded correctly
change_log_data_uploaded = self._get_tab_rows(changelog_tab_id)
today = datetime.today()
self.assertEqual([
['4', 'adding all fields', '1/3/20', '1.1.3', 'test@one.com'],
['6', 'removing all fields', '10/31/20', '1.70.1', 'test@two.com'],
['7', 'adding them back again', f'{today.month}/{today.day}/{today.year}',
self.mock_rdr_version, 'test@three.com']
], change_log_data_uploaded)
| bsd-3-clause | a1c13502ae3ecaa0980fa19d5b55caf1 | 52.021563 | 120 | 0.639927 | 3.98279 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/tools/tool_libs/resource_tool.py | 1 | 68245 | #! /bin/env python
#
# PDR data tools.
#
import argparse
# pylint: disable=superfluous-parens
# pylint: disable=broad-except
import datetime
import logging
import math
import os
import sys
from time import sleep
from werkzeug.exceptions import NotFound
from rdr_service.model.genomics import UserEventMetrics
from rdr_service.cloud_utils.gcp_cloud_tasks import GCPCloudTask
from rdr_service.dao.bigquery_sync_dao import BigQuerySyncDao
from rdr_service.dao.bq_participant_summary_dao import rebuild_bq_participant
from rdr_service.dao.bq_code_dao import BQCodeGenerator, BQCode
from rdr_service.dao.code_dao import Code
from rdr_service.dao.bq_questionnaire_dao import BQPDRQuestionnaireResponseGenerator
from rdr_service.dao.bq_genomics_dao import bq_genomic_set_update, bq_genomic_set_member_update, \
bq_genomic_job_run_update, bq_genomic_gc_validation_metrics_update, bq_genomic_file_processed_update, \
bq_genomic_manifest_file_update, bq_genomic_manifest_feedback_update
from rdr_service.dao.bq_workbench_dao import bq_workspace_update, bq_workspace_user_update, \
bq_institutional_affiliations_update, bq_researcher_update, bq_audit_update
from rdr_service.dao.bq_hpo_dao import bq_hpo_update, bq_hpo_update_by_id
from rdr_service.dao.bq_organization_dao import bq_organization_update, bq_organization_update_by_id
from rdr_service.dao.bq_site_dao import bq_site_update, bq_site_update_by_id
from rdr_service.dao.resource_dao import ResourceDataDao
from rdr_service.model.bigquery_sync import BigQuerySync
from rdr_service.model.bq_questionnaires import PDR_MODULE_LIST
from rdr_service.model.consent_file import ConsentFile
from rdr_service.model.participant import Participant
from rdr_service.model.resource_data import ResourceData
from rdr_service.model.retention_eligible_metrics import RetentionEligibleMetrics
from rdr_service.offline.bigquery_sync import batch_rebuild_participants_task
from rdr_service.resource import generators
from rdr_service.resource.generators.genomics import genomic_set_update, genomic_set_member_update, \
genomic_job_run_update, genomic_gc_validation_metrics_update, genomic_file_processed_update, \
genomic_manifest_file_update, genomic_manifest_feedback_update, genomic_informing_loop_update, \
genomic_cvl_result_past_due_update, genomic_member_report_state_update, genomic_result_viewed_update, \
genomic_appointment_event_update
from rdr_service.resource.constants import SKIP_TEST_PIDS_FOR_PDR
from rdr_service.resource.tasks import batch_rebuild_consent_metrics_task
from rdr_service.services.response_duplication_detector import ResponseDuplicationDetector
from rdr_service.services.system_utils import setup_logging, setup_i18n, print_progress_bar
from rdr_service.tools.tool_libs import GCPProcessContext, GCPEnvConfigObject
_logger = logging.getLogger("rdr_logger")
# Tool_cmd and tool_desc name are required.
# Remember to add/update bash completion in 'tool_lib/tools.bash'
tool_cmd = "resource"
tool_desc = "Tools for updating and cleaning PDR resource records in RDR"
# TODO: May want to use the BQTable __project_map__ instead? But that may fail if using localhost for dev testing
PDR_PROJECT_ID_MAP = {
'all-of-us-rdr-sandbox': 'all-of-us-rdr-sandbox',
'all-of-us-rdr-stable': 'aou-pdr-data-stable',
'all-of-us-rdr-prod': 'aou-pdr-data-prod',
'localhost': 'localhost'
}
GENOMIC_DB_TABLES = ('genomic_set', 'genomic_set_member', 'genomic_job_run', 'genomic_gc_validation_metrics',
'genomic_file_processed', 'genomic_manifest_file', 'genomic_manifest_feedback',
'genomic_informing_loop', 'genomic_cvl_result_past_due', 'genomic_member_report_state',
'genomic_result_viewed', 'genomic_appointment_event')
RESEARCH_WORKBENCH_TABLES = ('workspace', 'workspace_user', 'researcher', 'institutional_affiliations', 'audit')
SITE_TABLES = ('hpo', 'site', 'organization')
# Convenience function used by multiple resource tool classes
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
class CleanPDRDataClass(object):
def __init__(self, args, gcp_env: GCPEnvConfigObject, pk_id_list: None):
self.args = args
self.gcp_env = gcp_env
if args.id:
self.pk_id_list = [args.id, ]
else:
self.pk_id_list = pk_id_list
def delete_pk_ids_from_bigquery_sync(self, table_id):
"""
Delete the requested records from the bigquery_sync table. Note, that to restore an unintentional delete,
a rebuild can be performed of the associated pdr_mod_* data, but (for example) you may first need to find the
participants who are associated with the deleted data and rebuild *all* of their module response data.
:param table_id: Filter value for the bigquery_sync.table_id column
"""
# Target dataset for all the BigQuery RDR-to-PDR pipeline data
dataset_id = 'rdr_ops_data_view'
project_id = PDR_PROJECT_ID_MAP.get(self.gcp_env.project, None)
if not project_id:
raise ValueError(f'Unable to map {self.gcp_env.project} to an active BigQuery project')
else:
dao = BigQuerySyncDao()
with dao.session() as session:
# Verify the table_id matches at least some existing records in the bigquery_sync table
query = session.query(BigQuerySync.id)\
.filter(BigQuerySync.projectId == project_id).filter(BigQuerySync.datasetId == dataset_id)\
.filter(BigQuerySync.tableId == table_id)
if query.first() is None:
raise ValueError(f'No records found for bigquery_sync.table_id = {table_id}')
batch_count = 500
batch_total = len(self.pk_id_list)
processed = 0
for pk_ids in chunks(self.pk_id_list, batch_count):
session.query(BigQuerySync
).filter(BigQuerySync.projectId == project_id,
BigQuerySync.datasetId == dataset_id,
BigQuerySync.tableId == table_id
).filter(BigQuerySync.pk_id.in_(pk_ids)
).delete(synchronize_session=False)
# Inject a short delay between chunk-sized delete operations to avoid blocking other table updates
session.commit()
sleep(0.5)
processed += len(pk_ids)
if not self.args.debug:
print_progress_bar(
processed, batch_total, prefix="{0}/{1}:".format(processed, batch_total),
suffix="complete"
)
def delete_resource_pk_ids_from_resource_data(self, resource_type_id):
""" Perform deletions from the resource_data table based on resource_pk_id field matches """
dao = ResourceDataDao()
with dao.session() as session:
batch_count = 500
batch_total = len(self.pk_id_list)
processed = 0
for pk_ids in chunks(self.pk_id_list, batch_count):
session.query(ResourceData
).filter(ResourceData.resourceTypeID == resource_type_id
).filter(ResourceData.resourcePKID.in_(pk_ids)
).delete(synchronize_session=False)
# Inject a short delay between chunk-sized delete operations to avoid blocking other table updates
session.commit()
sleep(0.5)
processed += len(pk_ids)
if not self.args.debug:
print_progress_bar(
processed, batch_total, prefix="{0}/{1}:".format(processed, batch_total),
suffix="complete"
)
def run(self):
"""
Main program process
:return: Exit code value
"""
clr = self.gcp_env.terminal_colors
if not self.pk_id_list:
_logger.error('Nothing to do')
return 1
self.gcp_env.activate_sql_proxy()
_logger.info('')
_logger.info(clr.fmt('\nClean PDR Data pipeline records:', clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
if self.args.from_file:
_logger.info(' PK_IDs File : {0}'.format(clr.fmt(self.args.from_file)))
_logger.info(' Total PK_IDs : {0}'.format(clr.fmt(len(self.pk_id_list))))
_logger.info('=' * 90)
_logger.info('')
if self.args.pdr_mod_responses:
# This option/use case is primarily for orphaned responses due to retroactively flagged duplicate
# questionnaire responses. Use the cleanup code already in the ResponseDuplicationDetector class
dao = BigQuerySyncDao()
with dao.session() as session:
ResponseDuplicationDetector.clean_pdr_module_data(self.pk_id_list, session, self.gcp_env.project)
else:
if self.args.bq_table_id:
self.delete_pk_ids_from_bigquery_sync(self.args.bq_table_id)
# Can delete from both bigquery_sync and resource data tables on the same run as long as the
# bigquery_sync.pk_id matches the resource_data.resource_pk_id for the resource_type_id specified.
if self.args.resource_type_id:
self.delete_resource_pk_ids_from_resource_data(self.args.resource_type_id)
class ParticipantResourceClass(object):
def __init__(self, args, gcp_env: GCPEnvConfigObject, pid_list: None):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
:param pid_list: list of integer participant ids, if --from-file was specified
"""
self.args = args
self.gcp_env = gcp_env
self.pid_list = pid_list
self.qc_error_list = []
def update_single_pid(self, pid):
"""
Update a single pid
:param pid: participant id
:return: 0 if successful otherwise 1
"""
try:
if not self.args.modules_only:
if not self.args.qc:
# Skip the BQ build in QC mode; will just test the resource data return
rebuild_bq_participant(pid, project_id=self.gcp_env.project)
res = generators.participant.rebuild_participant_summary_resource(pid, qc_mode=self.args.qc)
if self.args.qc:
pid_dict = res.get_data()
rdr_status = pid_dict.get('enrollment_status_legacy_v2', None)
pdr_status = pid_dict.get('enrollment_status', None)
if not rdr_status and pdr_status == 'REGISTERED':
pass
elif rdr_status != pdr_status:
self.qc_error_list.append(f'P{pid} RDR {rdr_status} / PDR {pdr_status}')
if not self.args.no_modules and not self.args.qc:
mod_bqgen = BQPDRQuestionnaireResponseGenerator()
# Generate participant questionnaire module response data
for module in PDR_MODULE_LIST:
mod = module()
table, mod_bqrs = mod_bqgen.make_bqrecord(pid, mod.get_schema().get_module_name())
if not table:
continue
w_dao = BigQuerySyncDao()
with w_dao.session() as w_session:
for mod_bqr in mod_bqrs:
mod_bqgen.save_bqrecord(mod_bqr.questionnaire_response_id, mod_bqr, bqtable=table,
w_dao=w_dao, w_session=w_session, project_id=self.gcp_env.project)
except NotFound:
return 1
return 0
def update_batch(self, pids):
"""
Submit batches of pids to Cloud Tasks for rebuild.
"""
import gc
if self.gcp_env.project == 'all-of-us-rdr-prod':
batch_size = 100
else:
batch_size = 25
total_rows = len(pids)
batch_total = int(math.ceil(float(total_rows) / float(batch_size)))
if self.args.batch:
batch_total = math.ceil(total_rows / batch_size)
_logger.info('Calculated {0} tasks from {1} pids with a batch size of {2}.'.
format(batch_total, total_rows, batch_size))
count = 0
batch_count = 0
batch = list()
task = None if self.gcp_env.project == 'localhost' else GCPCloudTask()
# queue up a batch of participant ids and send them to be rebuilt.
for pid in pids:
if pid not in SKIP_TEST_PIDS_FOR_PDR:
batch.append({'pid': pid})
count += 1
if count == batch_size:
payload = {'batch': batch,
'build_modules': not self.args.no_modules,
'build_participant_summary': not self.args.modules_only
}
if self.gcp_env.project == 'localhost':
batch_rebuild_participants_task(payload)
else:
task.execute('rebuild_participants_task', payload=payload, in_seconds=30,
queue='resource-rebuild', project_id=self.gcp_env.project, quiet=True)
batch_count += 1
# reset for next batch
batch = list()
count = 0
if not self.args.debug:
print_progress_bar(
batch_count, batch_total, prefix="{0}/{1}:".format(batch_count, batch_total), suffix="complete"
)
# Collect the garbage after so long to prevent hitting open file limit.
if batch_count % 250 == 0:
gc.collect()
# send last batch if needed.
if count:
payload = {'batch': batch,
'build_modules': not self.args.no_modules,
'build_participant_summary': not self.args.modules_only
}
batch_count += 1
if self.gcp_env.project == 'localhost':
batch_rebuild_participants_task(payload)
else:
task.execute('rebuild_participants_task', payload=payload, in_seconds=30,
queue='resource-rebuild', project_id=self.gcp_env.project, quiet=True)
if not self.args.debug:
print_progress_bar(
batch_count, batch_total, prefix="{0}/{1}:".format(batch_count, batch_total), suffix="complete"
)
logging.info(f'Submitted {batch_count} tasks.')
return 0
def update_many_pids(self, pids):
"""
Update many pids from a file.
:return:
"""
if not pids:
return 1
if self.args.batch or (self.args.all_pids and not self.args.qc):
return self.update_batch(pids)
total_pids = len(pids)
count = 0
errors = 0
for pid in pids:
count += 1
if pid in SKIP_TEST_PIDS_FOR_PDR:
_logger.info(f'Skipping PDR data build for test pid {pid}')
continue
if self.update_single_pid(pid) != 0:
errors += 1
if self.args.debug:
_logger.error(f'PID {pid} not found.')
if not self.args.debug:
print_progress_bar(
count, total_pids, prefix="{0}/{1}:".format(count, total_pids), suffix="complete"
)
if errors > 0:
_logger.warning(f'\n\nThere were {errors} PIDs not found during processing.')
return 0
def run(self):
"""
Main program process
:return: Exit code value
"""
clr = self.gcp_env.terminal_colors
pids = self.pid_list
if not pids and not self.args.pid and not self.args.all_pids:
_logger.error('Nothing to do')
return 1
self.gcp_env.activate_sql_proxy()
_logger.info('')
_logger.info(clr.fmt('\nRebuild Participant Summaries for PDR:', clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
if pids:
_logger.info(' PIDs File : {0}'.format(clr.fmt(self.args.from_file)))
_logger.info(' Total PIDs : {0}'.format(clr.fmt(len(pids))))
elif self.args.all_pids:
dao = ResourceDataDao()
with dao.session() as session:
results = session.query(Participant.participantId).all()
pids = [p.participantId for p in results]
_logger.info(' Rebuild All PIDs : {0}'.format(clr.fmt('Yes')))
_logger.info(' Total PIDs : {0}'.format(clr.fmt(len(pids))))
elif self.args.pid:
_logger.info(' PID : {0}'.format(clr.fmt(self.args.pid)))
_logger.info('=' * 90)
_logger.info('')
if pids and len(pids):
return self.update_many_pids(pids)
if self.args.pid:
if self.update_single_pid(self.args.pid) == 0 and not self.args.qc:
_logger.info(f'Participant {self.args.pid} updated.')
elif not self.args.qc:
_logger.error(f'Participant ID {self.args.pid} not found.')
if self.qc_error_list:
print('\nDiffs between RDR enrollment_status[_legacy_v2] and PDR enrollment_status:\n')
for err in self.qc_error_list:
print(err)
elif self.args.qc:
print('\nNo enrollment status discrepancies found')
return 1
class CodeResourceClass(object):
def __init__(self, args, gcp_env: GCPEnvConfigObject):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
"""
self.args = args
self.gcp_env = gcp_env
def update_code_table(self):
ro_dao = BigQuerySyncDao(backup=True)
with ro_dao.session() as ro_session:
if not self.args.id:
results = ro_session.query(Code.codeId).all()
else:
# Force a list return type for the single-id lookup
results = ro_session.query(Code.codeId).filter(Code.codeId == self.args.id).all()
count = 0
total_ids = len(results)
w_dao = BigQuerySyncDao()
_logger.info(' Code table: rebuilding {0} records...'.format(total_ids))
with w_dao.session() as w_session:
for row in results:
gen = BQCodeGenerator()
rsc_gen = generators.code.CodeGenerator()
bqr = gen.make_bqrecord(row.codeId)
gen.save_bqrecord(row.codeId, bqr, project_id=self.gcp_env.project,
bqtable=BQCode, w_dao=w_dao, w_session=w_session)
rsc_rec = rsc_gen.make_resource(row.codeId)
rsc_rec.save()
count += 1
if not self.args.debug:
print_progress_bar(count, total_ids, prefix="{0}/{1}:".format(count, total_ids), suffix="complete")
def run(self):
clr = self.gcp_env.terminal_colors
self.gcp_env.activate_sql_proxy()
_logger.info('')
_logger.info(clr.fmt('\nUpdate Code table:',
clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
return self.update_code_table()
class GenomicResourceClass(object):
def __init__(self, args, gcp_env: GCPEnvConfigObject, id_list: None):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
:param id_list: list of integer ids from a genomic table, if --genomic-table and --from-file were specified
"""
self.args = args
self.gcp_env = gcp_env
self.id_list = id_list
def update_single_id(self, table, _id):
try:
if table == 'genomic_set':
bq_genomic_set_update(_id, project_id=self.gcp_env.project)
genomic_set_update(_id)
elif table == 'genomic_set_member':
bq_genomic_set_member_update(_id, project_id=self.gcp_env.project)
genomic_set_member_update(_id)
elif table == 'genomic_job_run':
bq_genomic_job_run_update(_id, project_id=self.gcp_env.project)
genomic_job_run_update(_id)
elif table == 'genomic_file_processed':
bq_genomic_file_processed_update(_id, project_id=self.gcp_env.project)
genomic_file_processed_update(_id)
elif table == 'genomic_manifest_file':
bq_genomic_manifest_file_update(_id, project_id=self.gcp_env.project)
genomic_manifest_file_update(_id)
elif table == 'genomic_manifest_feedback':
bq_genomic_manifest_feedback_update(_id, project_id=self.gcp_env.project)
genomic_manifest_feedback_update(_id)
elif table == 'genomic_gc_validation_metrics':
bq_genomic_gc_validation_metrics_update(_id, project_id=self.gcp_env.project)
genomic_gc_validation_metrics_update(_id)
elif table == 'genomic_informing_loop':
genomic_informing_loop_update(_id)
elif table == 'genomic_cvl_result_past_due':
genomic_cvl_result_past_due_update(_id)
elif table == 'genomic_member_report_state':
genomic_member_report_state_update(_id)
elif table == 'genomic_result_viewed':
genomic_result_viewed_update(_id)
elif table == 'genomic_appointment_event':
genomic_appointment_event_update(_id)
except NotFound:
return 1
return 0
def update_batch(self, table, _ids):
count = 0
task = None if self.gcp_env.project == 'localhost' else GCPCloudTask()
if not self.args.debug:
print_progress_bar(
count, len(_ids), prefix="{0}/{1}:".format(count, len(_ids)), suffix="complete"
)
for batch in chunks(_ids, 250):
if self.gcp_env.project == 'localhost':
for _id in batch:
self.update_single_id(table, _id)
else:
payload = {'table': table, 'ids': batch}
task.execute('rebuild_genomic_table_records_task', payload=payload, in_seconds=30,
queue='resource-rebuild', project_id=self.gcp_env.project, quiet=True)
count += len(batch)
if not self.args.debug:
print_progress_bar(
count, len(_ids), prefix="{0}/{1}:".format(count, len(_ids)), suffix="complete"
)
def update_many_ids(self, table, _ids):
if not _ids:
return 1
_logger.info(f'Processing batch for table {table}...')
if self.args.batch:
self.update_batch(table, _ids)
_logger.info(f'Processing {table} batch complete.')
return 0
total_ids = len(_ids)
count = 0
errors = 0
for _id in _ids:
count += 1
if self.update_single_id(table, _id) != 0:
errors += 1
if self.args.debug:
_logger.error(f'{table} ID {_id} not found.')
if not self.args.debug:
print_progress_bar(
count, total_ids, prefix="{0}/{1}:".format(count, total_ids), suffix="complete"
)
if errors > 0:
_logger.warning(f'\n\nThere were {errors} IDs not found during processing.')
return 0
def run(self):
clr = self.gcp_env.terminal_colors
if not self.args.id and not self.args.all_ids and not self.args.all_tables and not self.id_list:
_logger.error('Nothing to do')
return 1
self.gcp_env.activate_sql_proxy()
_logger.info('')
_logger.info(clr.fmt('\nRebuild Genomic Records for PDR:', clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
_logger.info(' Genomic Table : {0}'.format(clr.fmt(self.args.table)))
if self.args.all_ids or self.args.all_tables:
dao = ResourceDataDao()
_logger.info(' Rebuild All Records : {0}'.format(clr.fmt('Yes')))
if self.args.all_tables:
tables = [{'name': t, 'ids': list()} for t in GENOMIC_DB_TABLES]
else:
tables = [{'name': self.args.table, 'ids': list()}]
_logger.info(' Rebuild Table(s) : {0}'.format(
clr.fmt(', '.join([t['name'] for t in tables]))))
for table in tables:
with dao.session() as session:
results = session.execute(f'select id from {table["name"]}')
table['ids'] = [r.id for r in results]
_logger.info(' Total Records : {0} = {1}'.
format(clr.fmt(table["name"]), clr.fmt(len(table['ids']))))
for table in tables:
self.update_many_ids(table['name'], table['ids'])
elif self.args.id:
_logger.info(' Record ID : {0}'.format(clr.fmt(self.args.id)))
self.update_single_id(self.args.table, self.args.id)
elif self.id_list:
_logger.info(' Total Records : {0}'.format(clr.fmt(len(self.id_list))))
if len(self.id_list):
self.update_many_ids(self.args.table, self.id_list)
return 1
class EHRReceiptClass(object):
""" """
def __init__(self, args, gcp_env: GCPEnvConfigObject, pid_list: None):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
"""
self.args = args
self.gcp_env = gcp_env
self.pid_list = pid_list
def update_batch(self, records):
"""
Submit batches of pids to Cloud Tasks for rebuild.
"""
import gc
batch_size = 100
total_rows = len(records)
batch_total = int(math.ceil(float(total_rows) / float(batch_size)))
_logger.info('Calculated {0} tasks from {1} ehr records with a batch size of {2}.'.
format(batch_total, total_rows, batch_size))
count = 0
batch_count = 0
batch = list()
task = None if self.gcp_env.project == 'localhost' else GCPCloudTask()
from rdr_service.participant_enums import EhrStatus
# queue up a batch of participant ids and send them to be rebuilt.
for row in records:
ehr_status = EhrStatus(row.ehr_status)
batch.append({
'pid': row.participant_id,
'patch': {
'ehr_status': str(ehr_status),
'ehr_status_id': int(ehr_status),
'ehr_receipt': row.ehr_receipt_time.isoformat() if row.ehr_receipt_time else None,
'ehr_update': row.ehr_update_time.isoformat() if row.ehr_update_time else None
}
})
count += 1
if count == batch_size:
payload = {'batch': batch, 'build_participant_summary': True, 'build_modules': False}
if self.gcp_env.project == 'localhost':
batch_rebuild_participants_task(payload)
else:
task.execute('rebuild_participants_task', payload=payload, in_seconds=30,
queue='resource-rebuild', project_id=self.gcp_env.project, quiet=True)
batch_count += 1
# reset for next batch
batch = list()
count = 0
if not self.args.debug:
print_progress_bar(
batch_count, batch_total, prefix="{0}/{1}:".format(batch_count, batch_total), suffix="complete"
)
# Collect the garbage after so long to prevent hitting open file limit.
if batch_count % 250 == 0:
gc.collect()
# send last batch if needed.
if count:
payload = {'batch': batch, 'build_participant_summary': True, 'build_modules': False}
batch_count += 1
if self.gcp_env.project == 'localhost':
batch_rebuild_participants_task(payload)
else:
task.execute('rebuild_participants_task', payload=payload, in_seconds=30,
queue='resource-rebuild', project_id=self.gcp_env.project, quiet=True)
if not self.args.debug:
print_progress_bar(
batch_count, batch_total, prefix="{0}/{1}:".format(batch_count, batch_total), suffix="complete"
)
logging.info(f'Submitted {batch_count} tasks.')
return 0
def run(self):
clr = self.gcp_env.terminal_colors
self.gcp_env.activate_sql_proxy()
_logger.info('')
_logger.info(clr.fmt('\nUpdate Participant Summary Records with RDR EHR receipt data:',
clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
pids = self.pid_list if self.pid_list else []
dao = ResourceDataDao()
with dao.session() as session:
sql = 'select participant_id, ehr_status, ehr_receipt_time, ehr_update_time from participant_summary'
cursor = session.execute(sql)
if len(pids):
_logger.info(' PIDs File : {0}'.format(clr.fmt(self.args.from_file)))
_logger.info(' Total PIDs : {0}'.format(clr.fmt(len(pids))))
records = [row for row in cursor if row.participant_id in pids]
else:
records = [row for row in cursor]
_logger.info(' Total Records : {0}'.format(clr.fmt(len(records))))
_logger.info(' Batch Size : 100')
if len(records):
self.update_batch(records)
return 0
class ResearchWorkbenchResourceClass(object):
def __init__(self, args, gcp_env: GCPEnvConfigObject, id_list: None):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
:param id_list: list of integer ids from a research workbench table, if --table and --from-file were specified.
"""
self.args = args
self.gcp_env = gcp_env
self.id_list = id_list
def update_single_id(self, table, _id):
try:
if table == 'workspace':
bq_workspace_update(_id, project_id=self.gcp_env.project)
elif table == 'workspace_user':
bq_workspace_user_update(_id, project_id=self.gcp_env.project)
elif table == 'institutional_affiliations':
bq_institutional_affiliations_update(_id, project_id=self.gcp_env.project)
elif table == 'researcher':
bq_researcher_update(_id, project_id=self.gcp_env.project)
elif table == 'audit':
bq_audit_update(_id, project_id=self.gcp_env.project)
except NotFound:
return 1
return 0
def update_batch(self, table, _ids):
count = 0
task = None if self.gcp_env.project == 'localhost' else GCPCloudTask()
if not self.args.debug:
print_progress_bar(
count, len(_ids), prefix="{0}/{1}:".format(count, len(_ids)), suffix="complete"
)
for batch in chunks(_ids, 250):
if self.gcp_env.project == 'localhost':
for _id in batch:
self.update_single_id(table, _id)
else:
payload = {'table': table, 'ids': batch}
task.execute('rebuild_research_workbench_table_records_task', payload=payload, in_seconds=30,
queue='resource-rebuild', project_id=self.gcp_env.project, quiet=True)
count += len(batch)
if not self.args.debug:
print_progress_bar(
count, len(_ids), prefix="{0}/{1}:".format(count, len(_ids)), suffix="complete"
)
def update_many_ids(self, table, _ids):
if not _ids:
_logger.warning(f'No records found in table {table}, skipping.')
return 1
_logger.info(f'Processing batch for table {table}...')
if self.args.batch:
self.update_batch(table, _ids)
_logger.info(f'Processing {table} batch complete.')
return 0
total_ids = len(_ids)
count = 0
errors = 0
for _id in _ids:
count += 1
if self.update_single_id(table, _id) != 0:
errors += 1
if self.args.debug:
_logger.error(f'{table} ID {_id} not found.')
if not self.args.debug:
print_progress_bar(
count, total_ids, prefix="{0}/{1}:".format(count, total_ids), suffix="complete"
)
if errors > 0:
_logger.warning(f'\n\nThere were {errors} IDs not found during processing.')
return 0
def run(self):
clr = self.gcp_env.terminal_colors
if not self.args.id and not self.args.all_ids and not self.args.all_tables and not self.id_list:
_logger.error('Nothing to do')
return 1
self.gcp_env.activate_sql_proxy()
_logger.info('')
_logger.info(clr.fmt('\nRebuild Research Workbench Records for PDR:', clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
_logger.info(' Database Table : {0}'.format(clr.fmt(self.args.table)))
table_map = {
'workspace': 'workbench_workspace_snapshot',
'workspace_user': 'workbench_workspace_user_history',
'researcher': 'workbench_researcher_history',
'institutional_affiliations': 'workbench_institutional_affiliations_history',
'audit': 'workbench_audit'
}
if self.args.all_ids or self.args.all_tables:
dao = ResourceDataDao()
_logger.info(' Rebuild All Records : {0}'.format(clr.fmt('Yes')))
if self.args.all_tables:
tables = [{'name': t, 'ids': list()} for t in RESEARCH_WORKBENCH_TABLES]
else:
tables = [{'name': self.args.table, 'ids': list()}]
_logger.info(' Rebuild Table(s) : {0}'.format(
clr.fmt(', '.join([t['name'] for t in tables]))))
for table in tables:
with dao.session() as session:
results = session.execute(f'select id from {table_map[table["name"]]}')
table['ids'] = [r.id for r in results]
_logger.info(' Total Records : {0} = {1}'.
format(clr.fmt(table["name"]), clr.fmt(len(table['ids']))))
for table in tables:
self.update_many_ids(table['name'], table['ids'])
elif self.args.id:
_logger.info(' Record ID : {0}'.format(clr.fmt(self.args.id)))
self.update_single_id(self.args.table, self.args.id)
elif self.id_list:
_logger.info(' Total Records : {0}'.format(clr.fmt(len(self.id_list))))
if len(self.id_list):
self.update_many_ids(self.args.table, self.id_list)
return 1
class SiteResourceClass(object):
def __init__(self, args, gcp_env: GCPEnvConfigObject):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
"""
self.args = args
self.gcp_env = gcp_env
def run(self):
clr = self.gcp_env.terminal_colors
if not self.args.table and not self.args.all_tables:
_logger.error('Nothing to do')
return 1
self.gcp_env.activate_sql_proxy()
_logger.info('')
_logger.info(clr.fmt('\nRebuild hpo/organization/site Records for PDR:', clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
_logger.info(' Database Table : {0}'.format(clr.fmt(self.args.table)))
if self.args.all_tables:
tables = [t for t in SITE_TABLES]
else:
tables = [self.args.table]
if self.args.id:
_logger.info(' Record ID : {0}'.format(clr.fmt(self.args.id)))
else:
_logger.info(' Rebuild All Records : {0}'.format(clr.fmt('Yes')))
_logger.info(' Rebuild Table(s) : {0}'.format(clr.fmt(', '.join([t for t in tables]))))
for table in tables:
if table == 'hpo':
if self.args.id:
bq_hpo_update_by_id(self.args.id, self.gcp_env.project)
else:
bq_hpo_update(self.gcp_env.project)
elif table == 'site':
if self.args.id:
bq_site_update_by_id(self.args.id, self.gcp_env.project)
else:
bq_site_update(self.gcp_env.project)
elif table == 'organization':
if self.args.id:
bq_organization_update_by_id(self.gcp_env.project, self.gcp_env.project)
else:
bq_organization_update(self.gcp_env.project)
else:
_logger.warning(f'Unknown table {table}. Skipping rebuild for {table}')
return 0
class RetentionEligibleMetricClass:
""" Handle Retention Eligible Metric resource data """
def __init__(self, args, gcp_env: GCPEnvConfigObject, id_list: None):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
:param id_list: list of integer ids from retention eligible metrics table,
if --table and --from-file were specified.
"""
self.args = args
self.gcp_env = gcp_env
self.id_list = id_list
self.res_gen = generators.RetentionEligibleMetricGenerator()
def update_single_id(self, pid):
try:
res = self.res_gen.make_resource(pid)
res.save()
except NotFound:
_logger.error(f'Participant P{pid} not found in retention_eligible_metrics table.')
return 1
return 0
def update_batch(self, pids):
count = 0
task = None if self.gcp_env.project == 'localhost' else GCPCloudTask()
if not self.args.debug:
print_progress_bar(
count, len(pids), prefix="{0}/{1}:".format(count, len(pids)), suffix="complete"
)
for batch in chunks(pids, 250):
if self.gcp_env.project == 'localhost':
for id_ in batch:
self.update_single_id(id_)
else:
if isinstance(batch[0], int):
payload = {'rebuild_all': False, 'batch': batch}
else:
payload = {'rebuild_all': False, 'batch': [x[0] for x in batch]}
task.execute('batch_rebuild_retention_eligible_task', payload=payload, in_seconds=30,
queue='resource-rebuild', project_id=self.gcp_env.project, quiet=True)
count += len(batch)
if not self.args.debug:
print_progress_bar(
count, len(pids), prefix="{0}/{1}:".format(count, len(pids)), suffix="complete"
)
def update_many_ids(self, pids):
if not pids:
_logger.warning(f'No records found in batch, skipping.')
return 1
_logger.info(f'Processing retention eligible metrics batch...')
if self.args.batch:
self.update_batch(pids)
_logger.info(f'Processing retention eligible metrics batch complete.')
return 0
total_ids = len(pids)
count = 0
errors = 0
for pid in pids:
count += 1
if self.update_single_id(pid) != 0:
errors += 1
if self.args.debug:
_logger.error(f'ID {pid} not found.')
if not self.args.debug:
print_progress_bar(
count, total_ids, prefix="{0}/{1}:".format(count, total_ids), suffix="complete"
)
if errors > 0:
_logger.warning(f'\n\nThere were {errors} IDs not found during processing.')
return 0
def run(self):
clr = self.gcp_env.terminal_colors
if not self.args.pid and not self.args.all_pids and not self.id_list:
_logger.error('Nothing to do')
return 1
self.gcp_env.activate_sql_proxy()
_logger.info('')
_logger.info(clr.fmt('\nRebuild Retention Eligible Records for PDR:', clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
if self.args.all_pids :
dao = ResourceDataDao()
_logger.info(' Rebuild All Records : {0}'.format(clr.fmt('Yes')))
_logger.info('=' * 90)
with dao.session() as session:
pids = session.query(RetentionEligibleMetrics.participantId).all()
self.update_many_ids(pids)
elif self.args.pid:
_logger.info(' Participant ID : {0}'.format(clr.fmt(f'P{self.args.pid}')))
_logger.info('=' * 90)
self.update_single_id(self.args.pid)
elif self.id_list:
_logger.info(' Total Records : {0}'.format(clr.fmt(len(self.id_list))))
_logger.info('=' * 90)
if len(self.id_list):
self.update_many_ids(self.id_list)
return 1
class ConsentMetricClass(object):
""" Build consent validation metrics records for PDR extract """
def __init__(self, args, gcp_env: GCPEnvConfigObject, id_list: None):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
:param id_list: list of integer ids from consent_file table, if from_file or modified_since were specified.
"""
self.args = args
self.gcp_env = gcp_env
self.id_list = id_list
self.res_gen = generators.ConsentMetricGenerator()
def update_batch(self, _ids):
""" Batch update of ConsentMetric resource records """
batch_size = 250
count = 0
task = None if self.gcp_env.project == 'localhost' else GCPCloudTask()
batches = len(_ids) // batch_size
if len(_ids) % batch_size:
batches += 1
_logger.info(' Records : {0}'.format(self.gcp_env.terminal_colors.fmt(len(_ids))))
_logger.info(' Batch size : {0}'.format(self.gcp_env.terminal_colors.fmt(batch_size)))
_logger.info(' Tasks : {0}'.format(self.gcp_env.terminal_colors.fmt(batches)))
for batch in chunks(_ids, batch_size):
payload = {'batch': [id for id in batch]}
if self.gcp_env.project == 'localhost':
batch_rebuild_consent_metrics_task(payload)
else:
task.execute('batch_rebuild_consent_metrics_task', payload=payload, in_seconds=30,
queue='resource-rebuild', project_id=self.gcp_env.project, quiet=True)
count += 1
if not self.args.debug:
print_progress_bar(
count, batches, prefix="{0}/{1} tasks queued:".format(count, batches, suffix="complete"))
def run(self):
self.gcp_env.activate_sql_proxy()
clr = self.gcp_env.terminal_colors
_logger.info('')
_logger.info(clr.fmt('\nRebuild Consent Validation Metrics Records for PDR:', clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
_logger.info(' Batch mode enabled : {0}'.format(clr.fmt('Yes' if self.args.batch else 'No')))
dao = ResourceDataDao(backup=True)
csv_lines = []
if self.args.all_ids:
_logger.info(' Rebuild All Records : {0}'.format(clr.fmt('Yes')))
with dao.session() as session:
self.id_list = [r.id for r in session.query(ConsentFile.id).all()]
elif hasattr(self.args, 'from_file') and self.args.from_file:
self.id_list = get_id_list(self.args.from_file)
elif hasattr(self.args, 'id') and self.args.id:
# Use a one-element id list for rebuilding single ids
self.id_list = [self.args.id, ]
# The --modified-since option is applied last; will be ignored if another option provided the id list
elif self.args.modified_since:
with dao.session() as session:
results = session.query(ConsentFile.id).filter(ConsentFile.modified >= self.args.modified_since).all()
self.id_list = [r.id for r in results]
if not (self.id_list and len(self.id_list)):
_logger.error('Nothing to do')
return 1
if self.args.batch:
self.update_batch(self.id_list)
return 0
elif len(self.id_list) > 2500:
# Issue a warning if a local rebuild will exceed 2500 records; default to aborting
response = input(f'\n{len(self.id_list)} records will be rebuilt. Continue without --batch mode? [y/N]')
if not response or response.upper() == 'N':
_logger.error('Aborted by user')
return 1
results = self.res_gen.get_consent_validation_records(dao=dao, id_list=self.id_list)
csv_lines = []
column_headers = []
count = 0
if results:
if self.args.to_file:
line = "\t".join(column_headers)
csv_lines.append(f'{line}\n')
for row in results:
resource_data = self.res_gen.make_resource(row.id, consent_validation_rec=row)
if self.args.to_file:
# First line should be the column headers
if not len(column_headers):
column_headers = sorted(resource_data.get_data().keys())
line = "\t".join(column_headers)
csv_lines.append(f'{line}\n')
csv_values = []
for key, value in sorted(resource_data.get_data().items()):
if key in ['created', 'modified']:
csv_values.append(value.strftime("%Y-%m-%d %H:%M:%S") if value else "")
elif key in ['consent_authored_date', 'resolved_date']:
csv_values.append(value.strftime("%Y-%m-%d") if value else "")
elif isinstance(value, bool):
csv_values.append("1" if value else "0")
elif key == 'participant_id':
csv_values.append(value[1:])
else:
csv_values.append(str(value) if value else "")
line = "\t".join(csv_values)
csv_lines.append(f'{line}\n')
else:
resource_data.save(w_dao=ResourceDataDao(backup=False))
count += 1
if not self.args.debug:
print_progress_bar(
count, len(results), prefix="{0}/{1}:".format(count, len(results)), suffix="complete"
)
if self.args.to_file:
with open(self.args.to_file, "w") as f:
f.writelines(csv_lines)
return 0
class UserEventMetricsClass(object):
""" Build Color user event metrics records for PDR extract """
def __init__(self, args, gcp_env: GCPEnvConfigObject, id_list: None):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
:param id_list: list of integer ids from consent_file table, if from_file or modified_since were specified.
"""
self.args = args
self.gcp_env = gcp_env
self.id_list = id_list
self.res_gen = generators.GenomicUserEventMetricsSchemaGenerator()
def update_single_id(self, id_):
try:
res = self.res_gen.make_resource(id_)
res.save()
except NotFound:
_logger.error(f'Primary key id {id_} not found in user_event_metrics table.')
return 1
return 0
def update_batch(self, ids):
count = 0
task = None if self.gcp_env.project == 'localhost' else GCPCloudTask()
if not self.args.debug:
print_progress_bar(
count, len(ids), prefix="{0}/{1}:".format(count, len(ids)), suffix="complete"
)
for batch in chunks(ids, 250):
if self.gcp_env.project == 'localhost':
for id_ in batch:
self.update_single_id(id_)
else:
if isinstance(batch[0], int):
payload = {'rebuild_all': False, 'batch': batch}
else:
payload = {'rebuild_all': False, 'batch': [x[0] for x in batch]}
task.execute('batch_rebuild_user_event_metrics_task', payload=payload, in_seconds=30,
queue='resource-rebuild', project_id=self.gcp_env.project, quiet=True)
count += len(batch)
if not self.args.debug:
print_progress_bar(
count, len(ids), prefix="{0}/{1}:".format(count, len(ids)), suffix="complete"
)
def update_many_ids(self, ids):
if not ids:
_logger.warning(f'No records found in batch, skipping.')
return 1
_logger.info(f'Processing user event metrics batch...')
if self.args.batch:
self.update_batch(ids)
_logger.info(f'Processing retention eligible metrics batch complete.')
return 0
total_ids = len(ids)
count = 0
errors = 0
for id_ in ids:
count += 1
if self.update_single_id(id_) != 0:
errors += 1
if self.args.debug:
_logger.error(f'ID {id_} not found.')
if not self.args.debug:
print_progress_bar(
count, total_ids, prefix="{0}/{1}:".format(count, total_ids), suffix="complete"
)
if errors > 0:
_logger.warning(f'\n\nThere were {errors} IDs not found during processing.')
return 0
def run(self):
clr = self.gcp_env.terminal_colors
if not self.args.id and not self.args.all_ids and not self.id_list:
_logger.error('Nothing to do')
return 1
self.gcp_env.activate_sql_proxy()
_logger.info('')
_logger.info(clr.fmt('\nRebuild User Event Metric Records for PDR:', clr.custom_fg_color(156)))
_logger.info('')
_logger.info('=' * 90)
_logger.info(' Target Project : {0}'.format(clr.fmt(self.gcp_env.project)))
if self.args.all_ids :
dao = ResourceDataDao()
_logger.info(' Rebuild All Records : {0}'.format(clr.fmt('Yes')))
_logger.info('=' * 90)
with dao.session() as session:
ids = session.query(UserEventMetrics.id).all()
self.update_many_ids(ids)
elif self.args.id:
_logger.info(' Primary Key ID : {0}'.format(clr.fmt(f'{self.args.id}')))
_logger.info('=' * 90)
self.update_single_id(self.args.id)
elif self.id_list:
_logger.info(' Total Records : {0}'.format(clr.fmt(len(self.id_list))))
_logger.info('=' * 90)
if len(self.id_list):
self.update_many_ids(self.id_list)
return 1
def get_id_list(fname):
"""
Shared helper routine for tool classes that allow input from a file of integer ids (participant ids or
id values from a specific table).
:param fname: The filename passed with the --from-file argument
:return: A list of integers, or None on missing/empty fname
"""
filename = os.path.expanduser(fname)
if not os.path.exists(filename):
_logger.error(f"File '{fname}' not found.")
return None
# read ids from file.
ids = open(os.path.expanduser(fname)).readlines()
# convert ids from a list of strings to a list of integers.
ids = [int(i) for i in ids if i.strip()]
return ids if len(ids) else None
def run():
# Set global debug value and setup application logging.
setup_logging(
_logger, tool_cmd, "--debug" in sys.argv, "{0}.log".format(tool_cmd) if "--log-file" in sys.argv else None
)
setup_i18n()
# Setup program arguments.
parser = argparse.ArgumentParser(prog=tool_cmd, description=tool_desc)
parser.add_argument("--debug", help="enable debug output", default=False, action="store_true") # noqa
parser.add_argument("--log-file", help="write output to a log file", default=False, action="store_true") # noqa
parser.add_argument("--project", help="gcp project name", default="localhost") # noqa
parser.add_argument("--account", help="pmi-ops account", default=None) # noqa
parser.add_argument("--service-account", help="gcp iam service account", default=None) # noqa
# The "dest" add_subparsers() argument specifies the property name in the args object where the
# sub-parser used in the command line will be stored. IE: if args.resource == 'participant'...
subparser = parser.add_subparsers(title='resource types', dest='resource',
help='specific resource type to work with')
# Common individual arguments that may be used in multiple subparsers. The Help text and Choices can
# be overridden by calling update_argument() after the subparser has been created.
pid_parser = argparse.ArgumentParser(add_help=False)
pid_parser.add_argument("--pid", help="rebuild single participant id", type=int, default=None)
all_pids_parser = argparse.ArgumentParser(add_help=False)
all_pids_parser.add_argument("--all-pids", help="rebuild all participants", default=False, action="store_true")
id_parser = argparse.ArgumentParser(add_help=False)
id_parser.add_argument("--id", help="rebuild single genomic table id", type=int, default=None)
all_ids_parser = argparse.ArgumentParser(add_help=False)
all_ids_parser.add_argument("--all-ids", help="rebuild all records", default=False, action="store_true")
from_file_parser = argparse.ArgumentParser(add_help=False)
from_file_parser.add_argument("--from-file", help="rebuild resource ids from a file with a list of ids",
metavar='FILE', type=str, default=None)
table_parser = argparse.ArgumentParser(add_help=False)
table_parser.add_argument("--table", help="research workbench db table name to rebuild from", type=str,
metavar='TABLE')
all_tables_parser = argparse.ArgumentParser(add_help=False)
all_tables_parser.add_argument("--all-tables", help="rebuild all records from all tables", default=False,
action="store_true")
batch_parser = argparse.ArgumentParser(add_help=False)
batch_parser.add_argument("--batch", help="submit resource ids in batches to Cloud Tasks", default=False,
action="store_true")
# End common subparser arguments.
def update_argument(p, dest, help=None): # pylint: disable=redefined-builtin
"""
Update sub-parser argument description and choices.
:param dest: Destination property where argument value is stored. IE: 'file_name' == args.file_name.
"""
if not p or not dest:
raise ValueError('Arguments must include a sub-parser and dest string.')
for a in p._actions:
if a.dest == dest:
a.help = help
def argument_conflict(args_, ids_, choices=()):
""" Check if common arguments conflict """
if args_.table and args_.all_tables:
_logger.error("Arguments 'table' and 'all-tables' conflict.")
return True
elif args_.all_tables and args_.from_file:
_logger.error("Argument 'from-file' cannot be used with 'all-tables', only with 'table'")
return True
elif args_.id and ids_:
_logger.error("Argument 'from-file' cannot be used if a single 'id' was also specified")
return True
elif ids_ and not args_.table:
_logger.error("Argument 'from-file' was provided without a specified 'table' ")
return True
if args_.table and args_.table not in choices:
_logger.error(f"Argument 'table' value '{args_.table}' is invalid, possible values are:\n {choices}.")
return True
return False
# Rebuild participant resources
rebuild_parser = subparser.add_parser(
"participant",
parents=[from_file_parser, batch_parser, pid_parser, all_pids_parser])
rebuild_parser.add_argument("--no-modules", default=False, action="store_true",
help="do not rebuild participant questionnaire response data for pdr_mod_* tables")
rebuild_parser.add_argument("--modules-only", default=False, action="store_true",
help="only rebuild participant questionnaire response data for pdr_mod_* tables")
rebuild_parser.add_argument("--qc", default=False, action="store_true",
help="Goal 1 quality control to compare RDR and PDR enrollment status values")
update_argument(rebuild_parser, dest='from_file',
help="rebuild participant ids from a file with a list of pids")
# Rebuild the code table ids
code_parser = subparser.add_parser(
"code",
parents=[all_ids_parser]
)
code_parser.add_argument("--id", help="rebuild single code id", type=int, default=None)
update_argument(code_parser, dest='all_ids', help='rebuild all ids from the code table (default)')
# Rebuild genomic resources.
genomic_parser = subparser.add_parser(
"genomic",
parents=[id_parser, all_ids_parser, table_parser, all_tables_parser, from_file_parser, batch_parser])
update_argument(genomic_parser, 'table', help="genomic db table name to rebuild from")
genomic_parser.epilog = f'Possible TABLE Values: {{{",".join(GENOMIC_DB_TABLES)}}}.'
# Rebuild EHR receipt resources.
ehr_parser = subparser.add_parser('ehr-receipt', parents=[batch_parser, from_file_parser])
ehr_parser.add_argument("--ehr", help="Submit batch to Cloud Tasks", default=False,
action="store_true") # noqa
update_argument(ehr_parser, dest='from_file',
help="rebuild EHR info for specific participant ids read from a file with a list of pids")
# Rebuild Research Workbench resources.
rw_parser = subparser.add_parser(
"research-workbench",
parents=[batch_parser, id_parser, all_ids_parser, table_parser, all_tables_parser, from_file_parser])
update_argument(rw_parser, 'table', help="research workbench db table name to rebuild from")
rw_parser.epilog = f'Possible TABLE Values: {{{",".join(RESEARCH_WORKBENCH_TABLES)}}}.'
# Rebuild hpo/site/organization tables. Specify a single table name or all-tables
site_parser = subparser.add_parser(
"site-tables",
parents=[table_parser, all_tables_parser, id_parser]
)
update_argument(site_parser, 'table', help='db table name to rebuild from. All ids will be rebuilt')
site_parser.epilog = f'Possible TABLE values: {{{",".join(SITE_TABLES)}}}.'
# Rebuild Retention Eligibility resources
retention_parser = subparser.add_parser(
'retention', parents=[batch_parser, from_file_parser, pid_parser, all_pids_parser])
update_argument(retention_parser, dest='from_file',
help="rebuild retention eligibility records for specific pids read from a file.")
# Rebuild Consent Validation Metrics resources
consent_metrics_parser = subparser.add_parser('consent-metrics', parents=[batch_parser,
from_file_parser,
all_ids_parser,
id_parser])
consent_metrics_parser.add_argument("--modified-since",
dest='modified_since',
type=lambda s: datetime.datetime.strptime(s, '%Y-%m-%d'),
help="Modified date in in YYYY-MM-DD format"
)
consent_metrics_parser.add_argument("--to-file", dest="to_file", type=str, default=None,
help="TSV file to save generated records to instead of saving to database"
)
update_argument(consent_metrics_parser, dest='from_file',
help="rebuild consent metrics data for specific consent_file ids read from a file")
update_argument(consent_metrics_parser, dest='all_ids',
help="rebuild metrics records for all consent_file ids")
update_argument(consent_metrics_parser, dest='id', help="rebuild metrics for a specific consent_file id record")
# Rebuild Color user event metrics resources
user_event_metrics_parser = subparser.add_parser('user-event-metrics', parents=[batch_parser,
from_file_parser,
all_ids_parser,
id_parser])
update_argument(user_event_metrics_parser, dest='from_file',
help="rebuild user event metrics data for specific ids read from a file")
update_argument(user_event_metrics_parser, dest='all_ids',
help="rebuild user event metrics records for all ids")
update_argument(user_event_metrics_parser, dest='id', help="rebuild user event metrics for a specific id record")
# Perform cleanup of PDR data records orphaned because of related RDR data table backfills/cleanup
clean_pdr_data_parser = subparser.add_parser('clean-pdr-data', parents=[from_file_parser, id_parser])
update_argument(clean_pdr_data_parser, dest='id',
help="The id (pk_id for bigquery_sync, resource_pk_id for resource_data) of a record to delete"
)
update_argument(clean_pdr_data_parser, dest='from_file',
help="file with PDR data record pk_ids (bigquery_sync) or resource_pk_ids (resource_data) to delete"
)
clean_pdr_data_parser.add_argument('--bq-table-id', type=str, default=None,
help='table_id value whose bigquery_sync records should be cleaned')
clean_pdr_data_parser.add_argument('--resource-type-id', dest='resource_type_id', type=int, default=None,
help='resource_type_id whose resource_data records should be cleaned')
clean_pdr_data_parser.add_argument('--pdr-mod-responses', default=False, action='store_true',
help="clean all pdr_mod_* tables based on questionnaire_response_id list")
args = parser.parse_args()
with GCPProcessContext(tool_cmd, args.project, args.account, args.service_account) as gcp_env:
ids = None
if hasattr(args, 'from_file') and args.from_file:
ids = get_id_list(args.from_file)
# Rebuild participant resources
if args.resource == 'participant':
process = ParticipantResourceClass(args, gcp_env, ids)
exit_code = process.run()
# Rebuild genomic resources.
elif args.resource == 'genomic':
if argument_conflict(args, ids, choices=GENOMIC_DB_TABLES):
sys.exit(1)
process = GenomicResourceClass(args, gcp_env, ids)
exit_code = process.run()
# Rebuild EHR receipt resources.
elif args.resource == 'ehr-receipt':
process = EHRReceiptClass(args, gcp_env, ids)
exit_code = process.run()
# Rebuild Research Workbench resources.
elif args.resource == 'research-workbench':
if argument_conflict(args, ids, choices=RESEARCH_WORKBENCH_TABLES):
sys.exit(1)
process = ResearchWorkbenchResourceClass(args, gcp_env, ids)
exit_code = process.run()
elif args.resource == 'code':
process = CodeResourceClass(args, gcp_env)
exit_code = process.run()
elif args.resource == 'site-tables':
process = SiteResourceClass(args, gcp_env)
exit_code = process.run()
elif args.resource == 'retention':
process = RetentionEligibleMetricClass(args, gcp_env, ids)
exit_code = process.run()
elif args.resource == 'consent-metrics':
process = ConsentMetricClass(args, gcp_env, ids)
exit_code = process.run()
elif args.resource == 'user-event-metrics':
process = UserEventMetricsClass(args, gcp_env, ids)
exit_code = process.run()
elif args.resource == 'clean-pdr-data':
process = CleanPDRDataClass(args, gcp_env, ids)
exit_code = process.run()
else:
_logger.info('Please select an option to run. For help use "[resource] --help".')
exit_code = 1
return exit_code
# --- Main Program Call ---
if __name__ == "__main__":
sys.exit(run())
| bsd-3-clause | f2c526561a3ab4bef5006411fcf6c25d | 41.948395 | 120 | 0.564012 | 3.927092 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/endpoint.py | 1 | 4637 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/Endpoint) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class Endpoint(domainresource.DomainResource):
""" The technical details of an endpoint that can be used for electronic
services.
The technical details of an endpoint that can be used for electronic
services, such as for web services providing XDS.b or a REST endpoint for
another FHIR server. This may include any security context information.
"""
resource_type = "Endpoint"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.address = None
""" The technical base address for connecting to this endpoint.
Type `str`. """
self.connectionType = None
""" Protocol/Profile/Standard to be used with this endpoint connection.
Type `Coding` (represented as `dict` in JSON). """
self.contact = None
""" Contact details for source (e.g. troubleshooting).
List of `ContactPoint` items (represented as `dict` in JSON). """
self.header = None
""" Usage depends on the channel type.
List of `str` items. """
self.identifier = None
""" Identifies this endpoint across multiple systems.
List of `Identifier` items (represented as `dict` in JSON). """
self.managingOrganization = None
""" Organization that manages this endpoint (may not be the
organization that exposes the endpoint).
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.name = None
""" A name that this endpoint can be identified by.
Type `str`. """
self.payloadMimeType = None
""" Mimetype to send. If not specified, the content could be anything
(including no payload, if the connectionType defined this).
List of `str` items. """
self.payloadType = None
""" The type of content that may be used at this endpoint (e.g. XDS
Discharge summaries).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.period = None
""" Interval the endpoint is expected to be operational.
Type `Period` (represented as `dict` in JSON). """
self.status = None
""" active | suspended | error | off | entered-in-error | test.
Type `str`. """
super(Endpoint, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Endpoint, self).elementProperties()
js.extend([
("address", "address", str, False, None, True),
("connectionType", "connectionType", coding.Coding, False, None, True),
("contact", "contact", contactpoint.ContactPoint, True, None, False),
("header", "header", str, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("managingOrganization", "managingOrganization", fhirreference.FHIRReference, False, None, False),
("name", "name", str, False, None, False),
("payloadMimeType", "payloadMimeType", str, True, None, False),
("payloadType", "payloadType", codeableconcept.CodeableConcept, True, None, True),
("period", "period", period.Period, False, None, False),
("status", "status", str, False, None, True),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| bsd-3-clause | d845b88ee118540fc6aab5492b99080a | 37.641667 | 110 | 0.621954 | 4.378659 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/da96192073f0_modify_genomic_set_member_after_e2e.py | 1 | 3350 | """modify genomic_set_member after e2e
Revision ID: da96192073f0
Revises: b0520bacfbd0
Create Date: 2020-06-25 12:15:13.956804
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'da96192073f0'
down_revision = 'b0520bacfbd0'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('collection_tube_id', sa.String(length=80), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('collection_tube_id', sa.String(length=80), nullable=True))
op.add_column('genomic_set_member', sa.Column('gc_site_id', sa.String(length=11), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('gc_site_id', sa.String(length=11), nullable=True))
op.drop_constraint('genomic_set_member_ibfk_14', 'genomic_set_member', type_='foreignkey')
op.drop_column('genomic_set_member', 'gem_ptsc_sent_job_run_id')
op.drop_column('genomic_set_member_history', 'gem_ptsc_sent_job_run_id')
op.drop_column('genomic_set_member', 'consent_for_ror')
op.drop_column('genomic_set_member_history', 'consent_for_ror')
op.drop_column('genomic_set_member', 'withdrawn_status')
op.drop_column('genomic_set_member_history', 'withdrawn_status')
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('withdrawn_status', mysql.INTEGER(display_width=11),
autoincrement=False, nullable=True))
op.add_column('genomic_set_member_history',
sa.Column('withdrawn_status', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column('genomic_set_member', sa.Column('consent_for_ror', mysql.VARCHAR(length=10), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('consent_for_ror', mysql.VARCHAR(length=10), nullable=True))
op.add_column('genomic_set_member', sa.Column('gem_ptsc_sent_job_run_id', mysql.INTEGER(display_width=11),
autoincrement=False, nullable=True))
op.add_column('genomic_set_member_history', sa.Column('gem_ptsc_sent_job_run_id', mysql.INTEGER(display_width=11),
autoincrement=False, nullable=True))
op.create_foreign_key('genomic_set_member_ibfk_14', 'genomic_set_member', 'genomic_job_run', ['gem_ptsc_sent_job_run_id'], ['id'])
op.drop_column('genomic_set_member', 'gc_site_id')
op.drop_column('genomic_set_member_history', 'gc_site_id')
op.drop_column('genomic_set_member', 'collection_tube_id')
op.drop_column('genomic_set_member_history', 'collection_tube_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 5d886fed3a951402072f1de85853344d | 38.880952 | 134 | 0.664478 | 3.163362 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/code_constants.py | 1 | 11043 | """Constants for code values for questions and modules and their mappings to fields on
participant summaries and metrics."""
PPI_SYSTEM = "http://terminology.pmi-ops.org/CodeSystem/ppi"
# System for codes that are used in questionnaires but we don't need for analysis purposes;
# these codes are ignored by RDR.
PPI_EXTRA_SYSTEM = "http://terminology.pmi-ops.org/CodeSystem/ppi-extra"
MEASUREMENT_SYS = 'http://terminology.pmi-ops.org/CodeSystem/physical-measurements'
SITE_ID_SYSTEM = "https://www.pmi-ops.org/site-id"
QUEST_SITE_ID_SYSTEM = "https://www.pmi-ops.org/quest-site"
HEALTHPRO_USERNAME_SYSTEM = "https://www.pmi-ops.org/healthpro-username"
QUEST_USERNAME_SYSTEM = "https://www.pmi-ops.org/quest-user"
KIT_ID_SYSTEM = "https://orders.mayomedicallaboratories.com/kit-id"
QUEST_BIOBANK_ORDER_ORIGIN = 'careevolution'
FIRST_NAME_QUESTION_CODE = "PIIName_First"
LAST_NAME_QUESTION_CODE = "PIIName_Last"
MIDDLE_NAME_QUESTION_CODE = "PIIName_Middle"
ZIPCODE_QUESTION_CODE = "StreetAddress_PIIZIP"
STATE_QUESTION_CODE = "StreetAddress_PIIState"
STREET_ADDRESS_QUESTION_CODE = "PIIAddress_StreetAddress"
STREET_ADDRESS2_QUESTION_CODE = "PIIAddress_StreetAddress2"
CITY_QUESTION_CODE = "StreetAddress_PIICity"
EMPLOYMENT_ZIPCODE_QUESTION_CODE = "EmploymentWorkAddress_ZipCode"
PHONE_NUMBER_QUESTION_CODE = "PIIContactInformation_Phone"
LOGIN_PHONE_NUMBER_QUESTION_CODE = "ConsentPII_VerifiedPrimaryPhoneNumber"
EMAIL_QUESTION_CODE = "ConsentPII_EmailAddress"
RECONTACT_METHOD_QUESTION_CODE = "PIIContactInformation_RecontactMethod"
LANGUAGE_QUESTION_CODE = "Language_SpokenWrittenLanguage"
SEX_QUESTION_CODE = "BiologicalSexAtBirth_SexAtBirth"
SEXUAL_ORIENTATION_QUESTION_CODE = "TheBasics_SexualOrientation"
EDUCATION_QUESTION_CODE = "EducationLevel_HighestGrade"
INCOME_QUESTION_CODE = "Income_AnnualIncome"
EHR_CONSENT_QUESTION_CODE = "EHRConsentPII_ConsentPermission"
EHR_SENSITIVE_CONSENT_QUESTION_CODE = "ehrconsentpii_sensitivetype2"
EHR_CONSENT_EXPIRED_QUESTION_CODE = "EHRConsentPII_ConsentExpired"
DVEHR_SHARING_QUESTION_CODE = "DVEHRSharing_AreYouInterested"
CABOR_SIGNATURE_QUESTION_CODE = "ExtraConsent_CABoRSignature"
GROR_CONSENT_QUESTION_CODE = "ResultsConsent_CheckDNA"
COPE_CONSENT_QUESTION_CODE = "section_participation"
WEAR_CONSENT_QUESTION_CODE = "resultsconsent_wear"
PRIMARY_CONSENT_UPDATE_QUESTION_CODE = "Reconsent_ReviewConsentAgree"
VA_EHR_RECONSENT_QUESTION_CODE = "vaehrreconsent_agree"
VA_PRIMARY_RECONSENT_C1_C2_QUESTION = 'vaprimaryreconsent_c1_2_agree'
VA_PRIMARY_RECONSENT_C3_QUESTION = 'vaprimaryreconsent_c3_agree'
NON_VA_PRIMARY_RECONSENT_QUESTION = 'nonvaprimaryreconsent_agree'
DATE_OF_BIRTH_QUESTION_CODE = "PIIBirthInformation_BirthDate"
GENDER_IDENTITY_QUESTION_CODE = "Gender_GenderIdentity"
RACE_QUESTION_CODE = "Race_WhatRaceEthnicity"
# DA-2419: TheBasics possible partial survey content for profile/secondary contact updates.
BASICS_PROFILE_UPDATE_QUESTION_CODES = [
'PersonOneAddress_PersonOneAddressCity',
'PersonOneAddress_PersonOneAddressState',
'PersonOneAddress_PersonOneAddressZipCode',
'SecondaryContactInfo_PersonOneEmail',
'SecondaryContactInfo_PersonOneFirstName',
'SecondaryContactInfo_PersonOneMiddleInitial',
'SecondaryContactInfo_PersonOneLastName',
'SecondaryContactInfo_PersonOneAddressOne',
'SecondaryContactInfo_PersonOneAddressTwo',
'SecondaryContactInfo_PersonOneTelephone',
'SecondaryContactInfo_PersonOneRelationship',
'SecondaryContactInfo_SecondContactsFirstName',
'SecondaryContactInfo_SecondContactsMiddleInitial',
'SecondaryContactInfo_SecondContactsLastName',
'SecondaryContactInfo_SecondContactsAddressOne',
'SecondaryContactInfo_SecondContactsAddressTwo',
'SecondContactsAddress_SecondContactCity',
'SecondContactsAddress_SecondContactZipCode',
'SecondaryContactInfo_SecondContactsEmail',
'SecondaryContactInfo_SecondContactsNumber',
'SecondContactsAddress_SecondContactState',
'SecondaryContactInfo_SecondContactsRelationship',
'SocialSecurity_SocialSecurityNumber'
]
# General PMI answer codes
PMI_SKIP_CODE = "PMI_Skip"
PMI_PREFER_NOT_TO_ANSWER_CODE = "PMI_PreferNotToAnswer"
PMI_OTHER_CODE = "PMI_Other"
PMI_FREE_TEXT_CODE = "PMI_FreeText"
PMI_UNANSWERED_CODE = "PMI_Unanswered"
PMI_YES = "pmi_yes"
# Gender answer codes. 'GenderIdentity_MoreThanOne' is also an option, set in participant enums.
GENDER_MAN_CODE = "GenderIdentity_Man"
GENDER_WOMAN_CODE = "GenderIdentity_Woman"
GENDER_NONBINARY_CODE = "GenderIdentity_NonBinary"
GENDER_TRANSGENDER_CODE = "GenderIdentity_Transgender"
GENDER_OTHER_CODE = "GenderIdentity_AdditionalOptions"
GENDER_PREFER_NOT_TO_ANSWER_CODE = "PMI_PreferNotToAnswer"
GENDER_NO_GENDER_IDENTITY_CODE = "PMI_Skip"
# Race answer codes
RACE_AIAN_CODE = "WhatRaceEthnicity_AIAN"
RACE_ASIAN_CODE = "WhatRaceEthnicity_Asian"
RACE_BLACK_CODE = "WhatRaceEthnicity_Black"
RACE_MENA_CODE = "WhatRaceEthnicity_MENA"
RACE_NHDPI_CODE = "WhatRaceEthnicity_NHPI"
RACE_WHITE_CODE = "WhatRaceEthnicity_White"
RACE_HISPANIC_CODE = "WhatRaceEthnicity_Hispanic"
RACE_FREETEXT_CODE = "WhatRaceEthnicity_FreeText"
RACE_NONE_OF_THESE_CODE = "WhatRaceEthnicity_RaceEthnicityNoneOfThese"
WITHDRAWAL_CEREMONY_QUESTION_CODE = "withdrawalaianceremony"
WITHDRAWAL_CEREMONY_YES = "withdrawalaianceremony_yes"
WITHDRAWAL_CEREMONY_NO = "withdrawalaianceremony_no"
# Consent answer codes
CONSENT_PERMISSION_YES_CODE = "ConsentPermission_Yes"
CONSENT_PERMISSION_NO_CODE = "ConsentPermission_No"
CONSENT_PERMISSION_NOT_SURE = "ConsentPermission_NotSure"
EHR_CONSENT_EXPIRED_YES = "EHRConsentPII_ConsentExpired_Yes"
SENSITIVE_EHR_YES = "sensitivetype2__agree"
# Consent GROR Answer Codes
CONSENT_GROR_YES_CODE = "CheckDNA_Yes"
CONSENT_GROR_NO_CODE = "CheckDNA_No"
CONSENT_GROR_NOT_SURE = "CheckDNA_NotSure"
# Reconsent Answer Codes
COHORT_1_REVIEW_CONSENT_YES_CODE = "ReviewConsentAgree_Yes"
COHORT_1_REVIEW_CONSENT_NO_CODE = "ReviewConsentAgree_No"
WEAR_YES_ANSWER_CODE = "wear_yes"
# Cohort Group Code
CONSENT_COHORT_GROUP_CODE = "ConsentPII_CohortGroup"
# Consent COPE Answer Codes. (Deferred = expressed interest in taking the survey later)
CONSENT_COPE_YES_CODE = "COPE_A_44"
CONSENT_COPE_NO_CODE = "COPE_A_13"
CONSENT_COPE_DEFERRED_CODE = "COPE_A_231"
COPE_DOSE_RECEIVED_QUESTION = 'cdc_covid_xx'
COPE_NUMBER_DOSES_QUESTION = 'cdc_covid_xx_a'
COPE_ONE_DOSE_ANSWER = 'cope_a_332'
COPE_TWO_DOSE_ANSWER = 'cope_a_333'
COPE_DOSE_TYPE_QUESTION = 'cdc_covid_xx_b'
COPE_FIRST_DOSE_QUESTION = 'cdc_covid_xx_firstdose'
COPE_FIRST_DOSE_DATE_QUESTION = 'cdc_covid_xx_a_date1'
COPE_FIRST_DOSE_TYPE_QUESTION = 'cdc_covid_xx_b_firstdose'
COPE_FIRST_DOSE_TYPE_OTHER_QUESTION = 'cdc_covid_xx_b_firstdose_other'
COPE_FIRST_DOSE_SYMPTOM_QUESTION = 'cdc_covid_xx_symptom'
COPE_FIRST_DOSE_SYMPTOM_OTHER_QUESTION = 'cdc_covid_xx_symptom_cope_350'
COPE_SECOND_DOSE_QUESTION = 'cdc_covid_xx_seconddose'
COPE_SECOND_DOSE_DATE_QUESTION = 'cdc_covid_xx_a_date2'
COPE_SECOND_DOSE_TYPE_QUESTION = 'cdc_covid_xx_b_seconddose'
COPE_SECOND_DOSE_TYPE_OTHER_QUESTION = 'cdc_covid_xx_b_seconddose_other'
COPE_SECOND_DOSE_SYMPTOM_QUESTION = 'cdc_covid_xx_symptom_seconddose'
COPE_SECOND_DOSE_SYMPTOM_OTHER_QUESTION = 'cdc_covid_xx_symptom_seconddose_cope_350'
# COPE Minute Survey Codes
COPE_VACCINE_MINUTE_1_MODULE_CODE = "cope_vaccine1"
COPE_VACCINE_MINUTE_2_MODULE_CODE = "cope_vaccine2"
COPE_VACCINE_MINUTE_3_MODULE_CODE = "cope_vaccine3"
COPE_VACCINE_MINUTE_4_MODULE_CODE = "cope_vaccine4"
# Module names for questionnaires / consent forms
CONSENT_FOR_GENOMICS_ROR_MODULE = "GROR"
CONSENT_FOR_STUDY_ENROLLMENT_MODULE = "ConsentPII"
CONSENT_FOR_ELECTRONIC_HEALTH_RECORDS_MODULE = "EHRConsentPII"
CONSENT_FOR_DVEHR_MODULE = "DVEHRSharing"
OVERALL_HEALTH_PPI_MODULE = "OverallHealth"
LIFESTYLE_PPI_MODULE = "Lifestyle"
THE_BASICS_PPI_MODULE = "TheBasics"
FAMILY_HISTORY_MODULE = "FamilyHistory"
SOCIAL_DETERMINANTS_OF_HEALTH_MODULE = "sdoh"
PERSONAL_AND_FAMILY_HEALTH_HISTORY_MODULE = 'personalfamilyhistory'
PERSONAL_MEDICAL_HISTORY_MODULE = "PersonalMedicalHistory"
MEDICATIONS_MODULE = "MedicationsPPI"
REMOTE_PM_MODULE = 'pm_height_weight'
REMOTE_PM_UNIT = 'measurement_unit'
HEALTHCARE_ACCESS_MODULE = "HealthcareAccess"
# COVID Experience surveys:
# The COPE module covers the May/June/July (2020) COPE Survey questionnaires
# A new survey was developed for November 2020
COPE_MODULE = 'COPE'
COPE_NOV_MODULE = 'cope_nov'
COPE_DEC_MODULE = "cope_dec"
COPE_FEB_MODULE = "cope_feb"
GENETIC_ANCESTRY_MODULE = 'GeneticAncestry'
LIFE_FUNCTIONING_SURVEY = 'lfs'
PRIMARY_CONSENT_UPDATE_MODULE = 'PrimaryConsentUpdate'
VA_EHR_RECONSENT = 'vaehrreconsent'
# ConsentPII Questions
RECEIVE_CARE_STATE = "ReceiveCare_PIIState"
# ConsentPII Answers
OR_CARE_STATE = "PIIStateCare_OR"
TX_CARE_STATE = "PIIStateCare_TX"
AK_CARE_STATE = "PIIStateCare_AK"
MA_CARE_STATE = "PIIStateCare_MA"
MI_CARE_STATE = "PIIStateCare_MI"
OR_STATE = "PIIState_OR"
TX_STATE = "PIIState_TX"
AK_STATE = "PIIState_AK"
MA_STATE = "PIIState_MA"
MI_STATE = "PIIState_MI"
# DVEHR ANSWERS
DVEHRSHARING_CONSENT_CODE_YES = "DVEHRSharing_Yes"
DVEHRSHARING_CONSENT_CODE_NO = "DVEHRSharing_No"
DVEHRSHARING_CONSENT_CODE_NOT_SURE = "DVEHRSharing_NotSure"
# Genetic Ancestry Consent Answers
GENETIC_ANCESTRY_CONSENT_CODE_YES = "ConsentAncestryTraits_Yes"
GENETIC_ANCESTRY_CONSENT_CODE_NO = "ConsentAncestryTraits_No"
GENETIC_ANCESTRY_CONSENT_CODE_NOT_SURE = "ConsentAncestryTraits_NotSure"
# Digital Health Share Consents
APPLE_EHR_SHARING_MODULE = "participantintendstoshareappleehr"
APPLE_EHR_STOP_SHARING_MODULE = "participantintendstostopsharingappleehr"
APPLE_HEALTH_KIT_SHARING_MODULE = "participantintendstoshareapplehealthkit"
APPLE_HEALTH_KIT_STOP_SHARING_MODULE = "participantintendstostopsharingapplehealthkit"
FITBIT_SHARING_MODULE = "participantintendstosharefitbit"
FITBIT_STOP_SHARING_MODULE = "participantintendstostopsharingfitbit"
# General response answer codes
AGREE_YES = "agree_yes"
AGREE_NO = "agree_no"
BIOBANK_TESTS = [
"1ED10",
"2ED10",
"1ED04",
"1SST8",
"1SS08",
"1PST8",
"1PS08",
"2SST8",
"2PST8",
"1HEP4",
"1UR10",
"1UR90",
"1SAL2",
"1SAL",
"1ED02",
"1CFD9",
"1PXR2",
]
BIOBANK_TESTS_SET = frozenset(BIOBANK_TESTS)
UNSET = "UNSET"
UNMAPPED = "UNMAPPED"
BASE_VALUES = [UNSET, UNMAPPED, PMI_SKIP_CODE]
# English and Spanish are the only accepted languages for now
LANGUAGE_OF_CONSENT = ["en", "es"]
# genome type values
GENOME_TYPE = ["aou_array", "aou_wgs"]
GC_SITE_IDs = ['rdr', 'bcm', 'jh', 'bi', 'uw']
AW1_BUCKETS = [
'prod-genomics-baylor',
'prod-genomics-broad',
'prod-genomics-northwest'
]
AW2_BUCKETS = [
'prod-genomics-data-baylor',
'prod-genomics-data-broad',
'prod-genomics-data-northwest'
]
# Source of a created participant
ORIGINATING_SOURCES = ['vibrent', 'careevolution', 'example']
SENSITIVE_EHR_STATES = [
OR_CARE_STATE,
TX_CARE_STATE,
AK_CARE_STATE,
MA_CARE_STATE,
MI_CARE_STATE,
OR_STATE,
TX_STATE,
AK_STATE,
MA_STATE,
MI_STATE
]
| bsd-3-clause | f356aeba9c7551ec522c2f3a94a33d40 | 37.611888 | 96 | 0.774427 | 2.574726 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/genomic/genomic_mappings.py | 1 | 12678 | """
This module provides central location for all genomics_mappings
"""
from rdr_service.genomic_enums import GenomicReportState
genome_type_to_aw1_aw2_file_prefix = {
"aou_array": "GEN",
"aou_wgs": "SEQ",
"aou_array_investigation": "GEN",
"aou_wgs_investigation": "SEQ"
}
raw_aw1_to_genomic_set_member_fields = {
"package_id": "packageId",
"box_storageunit_id": "gcManifestBoxStorageUnitId",
"box_id_plate_id": "gcManifestBoxPlateId",
"well_position": "gcManifestWellPosition",
"sample_id": "sampleId",
"parent_sample_id": "gcManifestParentSampleId",
"collection_tube_id": "collectionTubeId",
"matrix_id": "gcManifestMatrixId",
"sample_type": "sampleType",
"treatments": "gcManifestTreatments",
"quantity": "gcManifestQuantity_ul",
"total_concentration": "gcManifestTotalConcentration_ng_per_ul",
"total_dna": "gcManifestTotalDNA_ng",
"visit_description": "gcManifestVisitDescription",
"sample_source": "gcManifestSampleSource",
"study": "gcManifestStudy",
"tracking_number": "gcManifestTrackingNumber",
"contact": "gcManifestContact",
"email": "gcManifestEmail",
"study_pi": "gcManifestStudyPI",
"test_name": "gcManifestTestName",
"failure_mode": "gcManifestFailureMode",
"failure_mode_desc": "gcManifestFailureDescription"
}
raw_aw2_to_genomic_set_member_fields = {
"lims_id": "limsId",
"chipwellbarcode": "chipwellbarcode",
"call_rate": "callRate",
"mean_coverage": "meanCoverage",
"genome_coverage": "genomeCoverage",
"aouhdr_coverage": "aouHdrCoverage",
"contamination": "contamination",
"sex_concordance": "sexConcordance",
"sex_ploidy": "sexPloidy",
"aligned_q30_bases": "alignedQ30Bases",
"array_concordance": "arrayConcordance",
"processing_status": "processingStatus",
"notes": "notes",
"pipeline_id": "pipelineId"
}
genomic_data_file_mappings = {
'idatRed': {
'file_ext': ['_red.idat'],
'model_attrs': ['idatRedPath', 'idatRedReceived']
},
'idatGreen': {
'file_ext': ['_grn.idat'],
'model_attrs': ['idatGreenPath', 'idatGreenReceived']
},
'idatRedMd5': {
'file_ext': ['_red.idat.md5sum'],
'model_attrs': ['idatRedMd5Path', 'idatRedMd5Received']
},
'idatGreenMd5': {
'file_ext': ['_grn.idat.md5sum'],
'model_attrs': ['idatGreenMd5Path', 'idatGreenMd5Received']
},
'vcf': {
'file_ext': ['vcf.gz'],
'model_attrs': ['vcfPath', 'vcfReceived']
},
'vcfTbi': {
'file_ext': ['vcf.gz.tbi'],
'model_attrs': ['vcfTbiPath', 'vcfTbiReceived']
},
'vcfMd5': {
'file_ext': ['vcf.gz.md5sum'],
'model_attrs': ['vcfMd5Path', 'vcfMd5Received']
},
'hfVcf': {
'file_ext': ['hard-filtered.vcf.gz'],
'model_attrs': ['hfVcfPath', 'hfVcfReceived']
},
'hfVcfTbi': {
'file_ext': ['hard-filtered.vcf.gz.tbi'],
'model_attrs': ['hfVcfTbiPath', 'hfVcfTbiReceived']
},
'hfVcfMd5': {
'file_ext': ['hard-filtered.vcf.gz.md5sum'],
'model_attrs': ['hfVcfMd5Path', 'hfVcfMd5Received']
},
'rawVcf': {
'file_ext': ['vcf.gz'],
'model_attrs': ['rawVcfPath', 'rawVcfReceived']
},
'rawVcfTbi': {
'file_ext': ['vcf.gz.tbi'],
'model_attrs': ['rawVcfTbiPath', 'rawVcfTbiReceived']
},
'rawVcfMd5': {
'file_ext': ['vcf.gz.md5sum'],
'model_attrs': ['rawVcfMd5Path', 'rawVcfMd5Received']
},
'cram': {
'file_ext': ['cram'],
'model_attrs': ['cramPath', 'cramReceived']
},
'cramMd5': {
'file_ext': ['cram.md5sum'],
'model_attrs': ['cramMd5Path', 'cramMd5Received']
},
'crai': {
'file_ext': ['cram.crai'],
'model_attrs': ['craiPath', 'craiReceived']
},
'gvcf': {
'file_ext': ['hard-filtered.gvcf.gz'],
'model_attrs': ['gvcfPath', 'gvcfReceived']
},
'gvcfMd5': {
'file_ext': ['hard-filtered.gvcf.gz.md5sum'],
'model_attrs': ['gvcfMd5Path', 'gvcfMd5Received']
},
'gcvf': {
'file_ext': ['hard-filtered.gvcf.gz'],
'model_attrs': ['gvcfPath', 'gvcfReceived']
},
'gcvf_md5': {
'file_ext': ['hard-filtered.gvcf.gz.md5sum'],
'model_attrs': ['gvcfMd5Path', 'gvcfMd5Received']
},
}
wgs_file_types_attributes = ({'file_path_attribute': 'hfVcfPath',
'file_received_attribute': 'hfVcfReceived',
'file_type': 'hard-filtered.vcf.gz',
'required': True},
{'file_path_attribute': 'hfVcfTbiPath',
'file_received_attribute': 'hfVcfTbiReceived',
'file_type': 'hard-filtered.vcf.gz.tbi',
'required': True},
{'file_path_attribute': 'hfVcfMd5Path',
'file_received_attribute': 'hfVcfMd5Received',
'file_type': 'hard-filtered.vcf.gz.md5sum',
'required': True},
{'file_path_attribute': 'rawVcfPath',
'file_received_attribute': 'rawVcfReceived',
'file_type': 'vcf.gz',
'required': False},
{'file_path_attribute': 'rawVcfTbiPath',
'file_received_attribute': 'rawVcfTbiReceived',
'file_type': 'vcf.gz.tbi',
'required': False},
{'file_path_attribute': 'rawVcfMd5Path',
'file_received_attribute': 'rawVcfMd5Received',
'file_type': 'vcf.gz.md5sum',
'required': False},
{'file_path_attribute': 'cramPath',
'file_received_attribute': 'cramReceived',
'file_type': 'cram',
'required': True},
{'file_path_attribute': 'cramMd5Path',
'file_received_attribute': 'cramMd5Received',
'file_type': 'cram.md5sum',
'required': True},
{'file_path_attribute': 'craiPath',
'file_received_attribute': 'craiReceived',
'file_type': 'cram.crai',
'required': True},
{'file_path_attribute': 'gvcfPath',
'file_received_attribute': 'gvcfReceived',
'file_type': 'hard-filtered.gvcf.gz',
'required': True},
{'file_path_attribute': 'gvcfMd5Path',
'file_received_attribute': 'gvcfMd5Received',
'file_type': 'hard-filtered.gvcf.gz.md5sum',
'required': True}
)
array_file_types_attributes = ({'file_path_attribute': 'idatRedPath',
'file_received_attribute': 'idatRedReceived',
'file_type': 'Red.idat',
'required': True},
{'file_path_attribute': 'idatGreenPath',
'file_received_attribute': 'idatGreenReceived',
'file_type': 'Grn.idat',
'required': True},
{'file_path_attribute': 'idatRedMd5Path',
'file_received_attribute': 'idatRedMd5Received',
'file_type': 'Red.idat.md5sum',
'required': True},
{'file_path_attribute': 'idatGreenMd5Path',
'file_received_attribute': 'idatGreenMd5Received',
'file_type': 'Grn.idat.md5sum',
'required': True},
{'file_path_attribute': 'vcfPath',
'file_received_attribute': 'vcfReceived',
'file_type': 'vcf.gz',
'required': True},
{'file_path_attribute': 'vcfTbiPath',
'file_received_attribute': 'vcfTbiReceived',
'file_type': 'vcf.gz.tbi',
'required': True},
{'file_path_attribute': 'vcfMd5Path',
'file_received_attribute': 'vcfMd5Received',
'file_type': 'vcf.gz.md5sum',
'required': True})
genome_centers_id_from_bucket_array = {
'baylor': 'jh',
'broad': 'bi',
'northwest': 'uw',
'data': 'rdr'
}
genome_centers_id_from_bucket_wgs = {
'baylor': 'bcm',
'broad': 'bi',
'northwest': 'uw'
}
informing_loop_event_mappings = {
'gem.informing_loop_decision.no': 'gem.informing_loop.screen8_no',
'gem.informing_loop_decision.yes': 'gem.informing_loop.screen8_yes',
'gem.informing_loop_decision.maybe_later': 'gem.informing_loop.screen8_maybe_later',
'pgx.informing_loop_decision.no': 'pgx.informing_loop.screen8_no',
'pgx.informing_loop_decision.yes': 'pgx.informing_loop.screen8_yes',
'pgx.informing_loop_decision.maybe_later': 'pgx.informing_loop.screen8_maybe_later',
'hdr.informing_loop_decision.no': 'hdr.informing_loop.screen10_no',
'hdr.informing_loop_decision.yes': 'hdr.informing_loop.screen10_yes',
'hdr.informing_loop_decision.maybe_later': 'hdr.informing_loop.screen10_maybe_later'
}
cvl_result_reconciliation_modules = {
"hdr": "hdr_v1",
"pgx": "pgx_v1"
}
message_broker_report_ready_event_state_mappings = {
"pgx.result_ready": GenomicReportState.PGX_RPT_READY,
"hdr.result_ready.informative": GenomicReportState.HDR_RPT_POSITIVE,
"hdr.result_ready.uninformative": GenomicReportState.HDR_RPT_UNINFORMATIVE
}
message_broker_report_viewed_event_state_mappings = [
"hdr.opened_at",
"pgx.opened_at"
]
genome_center_datafile_prefix_map = {
'bi': {
'cram': 'wgs_sample_raw_data/crams_crais',
'cram.crai': 'wgs_sample_raw_data/crams_crais',
'cram.md5sum': 'wgs_sample_raw_data/crams_crais',
'hard-filtered.vcf.gz': 'wgs_sample_raw_data/ss_vcf_clinical',
'hard-filtered.vcf.gz.md5sum': 'wgs_sample_raw_data/ss_vcf_clinical',
'hard-filtered.vcf.gz.tbi': 'wgs_sample_raw_data/ss_vcf_clinical',
'hard-filtered.gvcf.gz': 'wgs_sample_raw_data/ss_vcf_research',
'hard-filtered.gvcf.gz.md5sum': 'wgs_sample_raw_data/ss_vcf_research',
},
'uw': {
'cram': 'Wgs_sample_raw_data/CRAMs_CRAIs',
'cram.crai': 'Wgs_sample_raw_data/CRAMs_CRAIs',
'cram.md5sum': 'Wgs_sample_raw_data/CRAMs_CRAIs',
'hard-filtered.vcf.gz': 'Wgs_sample_raw_data/SS_VCF_clinical',
'hard-filtered.vcf.gz.md5sum': 'Wgs_sample_raw_data/SS_VCF_clinical',
'hard-filtered.vcf.gz.tbi': 'Wgs_sample_raw_data/SS_VCF_clinical',
'hard-filtered.gvcf.gz': 'Wgs_sample_raw_data/SS_VCF_research',
'hard-filtered.gvcf.gz.md5sum': 'Wgs_sample_raw_data/SS_VCF_research'
},
'bcm': {
'cram': 'Wgs_sample_raw_data/CRAMs_CRAIs',
'cram.crai': 'Wgs_sample_raw_data/CRAMs_CRAIs',
'cram.md5sum': 'Wgs_sample_raw_data/CRAMs_CRAIs',
'hard-filtered.vcf.gz': 'Wgs_sample_raw_data/SS_VCF_clinical',
'hard-filtered.vcf.gz.md5sum': 'Wgs_sample_raw_data/SS_VCF_clinical',
'hard-filtered.vcf.gz.tbi': 'Wgs_sample_raw_data/SS_VCF_clinical',
'hard-filtered.gvcf.gz': 'Wgs_sample_raw_data/SS_VCF_research',
'hard-filtered.gvcf.gz.md5sum': 'Wgs_sample_raw_data/SS_VCF_research'
},
'rdr': {
'cram': 'Wgs_sample_raw_data/CRAMs_CRAIs',
'cram.crai': 'Wgs_sample_raw_data/CRAMs_CRAIs',
'cram.md5sum': 'Wgs_sample_raw_data/CRAMs_CRAIs',
'hard-filtered.vcf.gz': 'Wgs_sample_raw_data/SS_VCF_clinical',
'hard-filtered.vcf.gz.md5sum': 'Wgs_sample_raw_data/SS_VCF_clinical',
'hard-filtered.vcf.gz.tbi': 'Wgs_sample_raw_data/SS_VCF_clinical',
'hard-filtered.gvcf.gz': 'Wgs_sample_raw_data/SS_VCF_research',
'hard-filtered.gvcf.gz.md5sum': 'Wgs_sample_raw_data/SS_VCF_research'
}
}
| bsd-3-clause | 609c71a120b0bab3f86858d2579ca458 | 41.543624 | 88 | 0.530683 | 3.203133 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/medication.py | 1 | 9113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Medication) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class Medication(domainresource.DomainResource):
""" Definition of a Medication.
This resource is primarily used for the identification and definition of a
medication. It covers the ingredients and the packaging for a medication.
"""
resource_name = "Medication"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Codes that identify this medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.isBrand = None
""" True if a brand.
Type `bool`. """
self.manufacturer = None
""" Manufacturer of the item.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.package = None
""" Details about packaged medications.
Type `MedicationPackage` (represented as `dict` in JSON). """
self.product = None
""" Administrable medication details.
Type `MedicationProduct` (represented as `dict` in JSON). """
super(Medication, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Medication, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("isBrand", "isBrand", bool, False, None, False),
("manufacturer", "manufacturer", fhirreference.FHIRReference, False, None, False),
("package", "package", MedicationPackage, False, None, False),
("product", "product", MedicationProduct, False, None, False),
])
return js
from . import backboneelement
class MedicationPackage(backboneelement.BackboneElement):
""" Details about packaged medications.
Information that only applies to packages (not products).
"""
resource_name = "MedicationPackage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.container = None
""" E.g. box, vial, blister-pack.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.content = None
""" What is in the package.
List of `MedicationPackageContent` items (represented as `dict` in JSON). """
super(MedicationPackage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationPackage, self).elementProperties()
js.extend([
("container", "container", codeableconcept.CodeableConcept, False, None, False),
("content", "content", MedicationPackageContent, True, None, False),
])
return js
class MedicationPackageContent(backboneelement.BackboneElement):
""" What is in the package.
A set of components that go to make up the described item.
"""
resource_name = "MedicationPackageContent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.amount = None
""" Quantity present in the package.
Type `Quantity` referencing `SimpleQuantity` (represented as `dict` in JSON). """
self.item = None
""" A product in the package.
Type `FHIRReference` referencing `Medication` (represented as `dict` in JSON). """
super(MedicationPackageContent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationPackageContent, self).elementProperties()
js.extend([
("amount", "amount", quantity.Quantity, False, None, False),
("item", "item", fhirreference.FHIRReference, False, None, True),
])
return js
class MedicationProduct(backboneelement.BackboneElement):
""" Administrable medication details.
Information that only applies to products (not packages).
"""
resource_name = "MedicationProduct"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.batch = None
""" None.
List of `MedicationProductBatch` items (represented as `dict` in JSON). """
self.form = None
""" powder | tablets | carton +.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.ingredient = None
""" Active or inactive ingredient.
List of `MedicationProductIngredient` items (represented as `dict` in JSON). """
super(MedicationProduct, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationProduct, self).elementProperties()
js.extend([
("batch", "batch", MedicationProductBatch, True, None, False),
("form", "form", codeableconcept.CodeableConcept, False, None, False),
("ingredient", "ingredient", MedicationProductIngredient, True, None, False),
])
return js
class MedicationProductBatch(backboneelement.BackboneElement):
""" None.
Information about a group of medication produced or packaged from one
production run.
"""
resource_name = "MedicationProductBatch"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.expirationDate = None
""" None.
Type `FHIRDate` (represented as `str` in JSON). """
self.lotNumber = None
""" None.
Type `str`. """
super(MedicationProductBatch, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationProductBatch, self).elementProperties()
js.extend([
("expirationDate", "expirationDate", fhirdate.FHIRDate, False, None, False),
("lotNumber", "lotNumber", str, False, None, False),
])
return js
class MedicationProductIngredient(backboneelement.BackboneElement):
""" Active or inactive ingredient.
Identifies a particular constituent of interest in the product.
"""
resource_name = "MedicationProductIngredient"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.amount = None
""" Quantity of ingredient present.
Type `Ratio` (represented as `dict` in JSON). """
self.item = None
""" The product contained.
Type `FHIRReference` referencing `Substance, Medication` (represented as `dict` in JSON). """
super(MedicationProductIngredient, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationProductIngredient, self).elementProperties()
js.extend([
("amount", "amount", ratio.Ratio, False, None, False),
("item", "item", fhirreference.FHIRReference, False, None, True),
])
return js
from . import codeableconcept
from . import fhirdate
from . import fhirreference
from . import quantity
from . import ratio
| bsd-3-clause | 033aff0242d0219b755bc1ba85d7d301 | 35.745968 | 101 | 0.6257 | 4.471541 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/substancesourcematerial.py | 1 | 21182 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/SubstanceSourceMaterial) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class SubstanceSourceMaterial(domainresource.DomainResource):
""" Source material shall capture information on the taxonomic and anatomical
origins as well as the fraction of a material that can result in or can be
modified to form a substance. This set of data elements shall be used to
define polymer substances isolated from biological matrices. Taxonomic and
anatomical origins shall be described using a controlled vocabulary as
required. This information is captured for naturally derived polymers ( .
starch) and structurally diverse substances. For Organisms belonging to the
Kingdom Plantae the Substance level defines the fresh material of a single
species or infraspecies, the Herbal Drug and the Herbal preparation. For
Herbal preparations, the fraction information will be captured at the
Substance information level and additional information for herbal extracts
will be captured at the Specified Substance Group 1 information level. See
for further explanation the Substance Class: Structurally Diverse and the
herbal annex.
"""
resource_type = "SubstanceSourceMaterial"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.countryOfOrigin = None
""" The country where the plant material is harvested or the countries
where the plasma is sourced from as laid down in accordance with
the Plasma Master File. For “Plasma-derived substances” the
attribute country of origin provides information about the
countries used for the manufacturing of the Cryopoor plama or
Crioprecipitate.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.developmentStage = None
""" Stage of life for animals, plants, insects and microorganisms. This
information shall be provided only when the substance is
significantly different in these stages (e.g. foetal bovine serum).
Type `CodeableConcept` (represented as `dict` in JSON). """
self.fractionDescription = None
""" Many complex materials are fractions of parts of plants, animals,
or minerals. Fraction elements are often necessary to define both
Substances and Specified Group 1 Substances. For substances derived
from Plants, fraction information will be captured at the Substance
information level ( . Oils, Juices and Exudates). Additional
information for Extracts, such as extraction solvent composition,
will be captured at the Specified Substance Group 1 information
level. For plasma-derived products fraction information will be
captured at the Substance and the Specified Substance Group 1
levels.
List of `SubstanceSourceMaterialFractionDescription` items (represented as `dict` in JSON). """
self.geographicalLocation = None
""" The place/region where the plant is harvested or the places/regions
where the animal source material has its habitat.
List of `str` items. """
self.organism = None
""" This subclause describes the organism which the substance is
derived from. For vaccines, the parent organism shall be specified
based on these subclause elements. As an example, full taxonomy
will be described for the Substance Name: ., Leaf.
Type `SubstanceSourceMaterialOrganism` (represented as `dict` in JSON). """
self.organismId = None
""" The unique identifier associated with the source material parent
organism shall be specified.
Type `Identifier` (represented as `dict` in JSON). """
self.organismName = None
""" The organism accepted Scientific name shall be provided based on
the organism taxonomy.
Type `str`. """
self.parentSubstanceId = None
""" The parent of the herbal drug Ginkgo biloba, Leaf is the substance
ID of the substance (fresh) of Ginkgo biloba L. or Ginkgo biloba L.
(Whole plant).
List of `Identifier` items (represented as `dict` in JSON). """
self.parentSubstanceName = None
""" The parent substance of the Herbal Drug, or Herbal preparation.
List of `str` items. """
self.partDescription = None
""" To do.
List of `SubstanceSourceMaterialPartDescription` items (represented as `dict` in JSON). """
self.sourceMaterialClass = None
""" General high level classification of the source material specific
to the origin of the material.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.sourceMaterialState = None
""" The state of the source material when extracted.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.sourceMaterialType = None
""" The type of the source material shall be specified based on a
controlled vocabulary. For vaccines, this subclause refers to the
class of infectious agent.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(SubstanceSourceMaterial, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceSourceMaterial, self).elementProperties()
js.extend([
("countryOfOrigin", "countryOfOrigin", codeableconcept.CodeableConcept, True, None, False),
("developmentStage", "developmentStage", codeableconcept.CodeableConcept, False, None, False),
("fractionDescription", "fractionDescription", SubstanceSourceMaterialFractionDescription, True, None, False),
("geographicalLocation", "geographicalLocation", str, True, None, False),
("organism", "organism", SubstanceSourceMaterialOrganism, False, None, False),
("organismId", "organismId", identifier.Identifier, False, None, False),
("organismName", "organismName", str, False, None, False),
("parentSubstanceId", "parentSubstanceId", identifier.Identifier, True, None, False),
("parentSubstanceName", "parentSubstanceName", str, True, None, False),
("partDescription", "partDescription", SubstanceSourceMaterialPartDescription, True, None, False),
("sourceMaterialClass", "sourceMaterialClass", codeableconcept.CodeableConcept, False, None, False),
("sourceMaterialState", "sourceMaterialState", codeableconcept.CodeableConcept, False, None, False),
("sourceMaterialType", "sourceMaterialType", codeableconcept.CodeableConcept, False, None, False),
])
return js
from . import backboneelement
class SubstanceSourceMaterialFractionDescription(backboneelement.BackboneElement):
""" Many complex materials are fractions of parts of plants, animals, or
minerals. Fraction elements are often necessary to define both Substances
and Specified Group 1 Substances. For substances derived from Plants,
fraction information will be captured at the Substance information level (
. Oils, Juices and Exudates). Additional information for Extracts, such as
extraction solvent composition, will be captured at the Specified Substance
Group 1 information level. For plasma-derived products fraction information
will be captured at the Substance and the Specified Substance Group 1
levels.
"""
resource_type = "SubstanceSourceMaterialFractionDescription"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.fraction = None
""" This element is capturing information about the fraction of a plant
part, or human plasma for fractionation.
Type `str`. """
self.materialType = None
""" The specific type of the material constituting the component. For
Herbal preparations the particulars of the extracts (liquid/dry) is
described in Specified Substance Group 1.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(SubstanceSourceMaterialFractionDescription, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceSourceMaterialFractionDescription, self).elementProperties()
js.extend([
("fraction", "fraction", str, False, None, False),
("materialType", "materialType", codeableconcept.CodeableConcept, False, None, False),
])
return js
class SubstanceSourceMaterialOrganism(backboneelement.BackboneElement):
""" This subclause describes the organism which the substance is derived from.
For vaccines, the parent organism shall be specified based on these
subclause elements. As an example, full taxonomy will be described for the
Substance Name: ., Leaf.
"""
resource_type = "SubstanceSourceMaterialOrganism"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.author = None
""" 4.9.13.6.1 Author type (Conditional).
List of `SubstanceSourceMaterialOrganismAuthor` items (represented as `dict` in JSON). """
self.family = None
""" The family of an organism shall be specified.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.genus = None
""" The genus of an organism shall be specified; refers to the Latin
epithet of the genus element of the plant/animal scientific name;
it is present in names for genera, species and infraspecies.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.hybrid = None
""" 4.9.13.8.1 Hybrid species maternal organism ID (Optional).
Type `SubstanceSourceMaterialOrganismHybrid` (represented as `dict` in JSON). """
self.intraspecificDescription = None
""" The intraspecific description of an organism shall be specified
based on a controlled vocabulary. For Influenza Vaccine, the
intraspecific description shall contain the syntax of the antigen
in line with the WHO convention.
Type `str`. """
self.intraspecificType = None
""" The Intraspecific type of an organism shall be specified.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.organismGeneral = None
""" 4.9.13.7.1 Kingdom (Conditional).
Type `SubstanceSourceMaterialOrganismOrganismGeneral` (represented as `dict` in JSON). """
self.species = None
""" The species of an organism shall be specified; refers to the Latin
epithet of the species of the plant/animal; it is present in names
for species and infraspecies.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(SubstanceSourceMaterialOrganism, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceSourceMaterialOrganism, self).elementProperties()
js.extend([
("author", "author", SubstanceSourceMaterialOrganismAuthor, True, None, False),
("family", "family", codeableconcept.CodeableConcept, False, None, False),
("genus", "genus", codeableconcept.CodeableConcept, False, None, False),
("hybrid", "hybrid", SubstanceSourceMaterialOrganismHybrid, False, None, False),
("intraspecificDescription", "intraspecificDescription", str, False, None, False),
("intraspecificType", "intraspecificType", codeableconcept.CodeableConcept, False, None, False),
("organismGeneral", "organismGeneral", SubstanceSourceMaterialOrganismOrganismGeneral, False, None, False),
("species", "species", codeableconcept.CodeableConcept, False, None, False),
])
return js
class SubstanceSourceMaterialOrganismAuthor(backboneelement.BackboneElement):
""" 4.9.13.6.1 Author type (Conditional).
"""
resource_type = "SubstanceSourceMaterialOrganismAuthor"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.authorDescription = None
""" The author of an organism species shall be specified. The author
year of an organism shall also be specified when applicable; refers
to the year in which the first author(s) published the
infraspecific plant/animal name (of any rank).
Type `str`. """
self.authorType = None
""" The type of author of an organism species shall be specified. The
parenthetical author of an organism species refers to the first
author who published the plant/animal name (of any rank). The
primary author of an organism species refers to the first
author(s), who validly published the plant/animal name.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(SubstanceSourceMaterialOrganismAuthor, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceSourceMaterialOrganismAuthor, self).elementProperties()
js.extend([
("authorDescription", "authorDescription", str, False, None, False),
("authorType", "authorType", codeableconcept.CodeableConcept, False, None, False),
])
return js
class SubstanceSourceMaterialOrganismHybrid(backboneelement.BackboneElement):
""" 4.9.13.8.1 Hybrid species maternal organism ID (Optional).
"""
resource_type = "SubstanceSourceMaterialOrganismHybrid"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.hybridType = None
""" The hybrid type of an organism shall be specified.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.maternalOrganismId = None
""" The identifier of the maternal species constituting the hybrid
organism shall be specified based on a controlled vocabulary. For
plants, the parents aren’t always known, and it is unlikely that it
will be known which is maternal and which is paternal.
Type `str`. """
self.maternalOrganismName = None
""" The name of the maternal species constituting the hybrid organism
shall be specified. For plants, the parents aren’t always known,
and it is unlikely that it will be known which is maternal and
which is paternal.
Type `str`. """
self.paternalOrganismId = None
""" The identifier of the paternal species constituting the hybrid
organism shall be specified based on a controlled vocabulary.
Type `str`. """
self.paternalOrganismName = None
""" The name of the paternal species constituting the hybrid organism
shall be specified.
Type `str`. """
super(SubstanceSourceMaterialOrganismHybrid, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceSourceMaterialOrganismHybrid, self).elementProperties()
js.extend([
("hybridType", "hybridType", codeableconcept.CodeableConcept, False, None, False),
("maternalOrganismId", "maternalOrganismId", str, False, None, False),
("maternalOrganismName", "maternalOrganismName", str, False, None, False),
("paternalOrganismId", "paternalOrganismId", str, False, None, False),
("paternalOrganismName", "paternalOrganismName", str, False, None, False),
])
return js
class SubstanceSourceMaterialOrganismOrganismGeneral(backboneelement.BackboneElement):
""" 4.9.13.7.1 Kingdom (Conditional).
"""
resource_type = "SubstanceSourceMaterialOrganismOrganismGeneral"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.class_fhir = None
""" The class of an organism shall be specified.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.kingdom = None
""" The kingdom of an organism shall be specified.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.order = None
""" The order of an organism shall be specified,.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.phylum = None
""" The phylum of an organism shall be specified.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(SubstanceSourceMaterialOrganismOrganismGeneral, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceSourceMaterialOrganismOrganismGeneral, self).elementProperties()
js.extend([
("class_fhir", "class", codeableconcept.CodeableConcept, False, None, False),
("kingdom", "kingdom", codeableconcept.CodeableConcept, False, None, False),
("order", "order", codeableconcept.CodeableConcept, False, None, False),
("phylum", "phylum", codeableconcept.CodeableConcept, False, None, False),
])
return js
class SubstanceSourceMaterialPartDescription(backboneelement.BackboneElement):
""" To do.
"""
resource_type = "SubstanceSourceMaterialPartDescription"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.part = None
""" Entity of anatomical origin of source material within an organism.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.partLocation = None
""" The detailed anatomic location when the part can be extracted from
different anatomical locations of the organism. Multiple
alternative locations may apply.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(SubstanceSourceMaterialPartDescription, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SubstanceSourceMaterialPartDescription, self).elementProperties()
js.extend([
("part", "part", codeableconcept.CodeableConcept, False, None, False),
("partLocation", "partLocation", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
| bsd-3-clause | b7a2c2cede8d82cb1e57b288cd8896ce | 47.232346 | 122 | 0.670681 | 4.370279 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/parameterdefinition.py | 1 | 2568 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/ParameterDefinition) on 2017-03-22.
# 2017, SMART Health IT.
from . import element
class ParameterDefinition(element.Element):
""" Definition of a parameter to a module.
The parameters to the module. This collection specifies both the input and
output parameters. Input parameters are provided by the caller as part of
the $evaluate operation. Output parameters are included in the
GuidanceResponse.
"""
resource_type = "ParameterDefinition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.documentation = None
""" A brief description of the parameter.
Type `str`. """
self.max = None
""" Maximum cardinality (a number of *).
Type `str`. """
self.min = None
""" Minimum cardinality.
Type `int`. """
self.name = None
""" Name used to access the parameter value.
Type `str`. """
self.profile = None
""" What profile the value is expected to be.
Type `FHIRReference` referencing `StructureDefinition` (represented as `dict` in JSON). """
self.type = None
""" What type of value.
Type `str`. """
self.use = None
""" in | out.
Type `str`. """
super(ParameterDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ParameterDefinition, self).elementProperties()
js.extend([
("documentation", "documentation", str, False, None, False),
("max", "max", str, False, None, False),
("min", "min", int, False, None, False),
("name", "name", str, False, None, False),
("profile", "profile", fhirreference.FHIRReference, False, None, False),
("type", "type", str, False, None, True),
("use", "use", str, False, None, True),
])
return js
import sys
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
| bsd-3-clause | 73bb2a87fb377b4abd1b0b3472d18109 | 32.350649 | 111 | 0.58528 | 4.39726 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/1dff4309d707_hash_and_duplicate_flag_on_.py | 1 | 2154 | """hash and duplicate flag on questionnaire response
Revision ID: 1dff4309d707
Revises: a6c3846e5eae
Create Date: 2021-04-01 12:55:33.967305
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '1dff4309d707'
down_revision = 'a6c3846e5eae'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('questionnaire_response', sa.Column('answer_hash', sa.String(length=32), nullable=True))
op.add_column('questionnaire_response', sa.Column('is_duplicate', sa.Boolean(), server_default=sa.text('false'), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('questionnaire_response', 'is_duplicate')
op.drop_column('questionnaire_response', 'answer_hash')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 56efd632efa7106de3e23a70e8d3be13 | 33.741935 | 132 | 0.750232 | 3.602007 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/query.py | 1 | 3882 | """A query to run against a DAO (abstracted from the persistent level)."""
from protorpc import messages
from sqlalchemy import func, not_, or_
class Operator(messages.Enum):
EQUALS = 0 # Case insensitive comparison for strings, exact comparison otherwise
LESS_THAN = 1
GREATER_THAN = 2
LESS_THAN_OR_EQUALS = 3
GREATER_THAN_OR_EQUALS = 4
NOT_EQUALS = 5
EQUALS_OR_NONE = 6
# Note: we don't support contains or exact string comparison at this stage
class PropertyType(messages.Enum):
STRING = 0
DATE = 1
DATETIME = 2
ENUM = 3
INTEGER = 4
CODE = 5
class FieldJsonContainsFilter(object):
"""
Filter json field using JSON_CONTAINS
"""
def __init__(self, field_name, operator, value):
self.field_name = field_name
self.operator = operator
self.value = value
def add_to_sqlalchemy_query(self, query, field):
if self.value is None:
return query.filter(field.is_(None))
if self.operator == Operator.NOT_EQUALS:
return query.filter(func.json_contains(field, self.value, "$") == 0)
else:
return query.filter(func.json_contains(field, self.value, "$") == 1)
class FieldLikeFilter(object):
"""
Handle SQL Like filters
"""
def __init__(self, field_name, operator, value):
self.field_name = field_name
self.operator = operator
self.value = value
def add_to_sqlalchemy_query(self, query, field):
if self.value is None:
return query.filter(field.is_(None))
if self.operator == Operator.NOT_EQUALS:
return query.filter(not_(field.like("%{0}%".format(self.value))))
else:
return query.filter(field.like("%{0}%".format(self.value)))
class FieldFilter(object):
def __init__(self, field_name, operator, value):
self.field_name = field_name
self.operator = operator
self.value = value
def add_to_sqlalchemy_query(self, query, field):
if self.value is None:
return query.filter(field.is_(None))
query = {
Operator.EQUALS: query.filter(field == self.value),
Operator.LESS_THAN: query.filter(field < self.value),
Operator.GREATER_THAN: query.filter(field > self.value),
Operator.LESS_THAN_OR_EQUALS: query.filter(field <= self.value),
Operator.GREATER_THAN_OR_EQUALS: query.filter(field >= self.value),
Operator.NOT_EQUALS: query.filter(field != self.value),
Operator.EQUALS_OR_NONE: query.filter(or_(field == self.value, field == None)),
}.get(self.operator)
if not query:
raise ValueError("Invalid operator: %r." % self.operator)
return query
class OrderBy(object):
def __init__(self, field_name, ascending):
self.field_name = field_name
self.ascending = ascending
class Query(object):
def __init__(
self,
field_filters,
order_by,
max_results,
pagination_token,
a_id=None,
always_return_token=False,
include_total=False,
offset=False,
options=None,
invalid_filters=None
):
self.field_filters = field_filters
self.order_by = order_by
self.offset = offset
self.max_results = max_results
self.pagination_token = pagination_token
self.ancestor_id = a_id
self.always_return_token = always_return_token
self.include_total = include_total
self.options = options
self.invalid_filters = invalid_filters
class Results(object):
def __init__(self, items, pagination_token=None, more_available=False, total=None):
self.items = items
self.pagination_token = pagination_token
self.more_available = more_available
self.total = total
| bsd-3-clause | b062545c825cd43b31dde783a14db794 | 30.560976 | 91 | 0.615147 | 3.885886 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/concepts.py | 1 | 4142 | """A single place for FHIR concepts."""
from collections import namedtuple
from rdr_service.census_regions import census_regions
Concept = namedtuple("Concept", ["system", "code"])
SYSTEM_CONSENT_FORM = "http://terminology.pmi-ops.org/CodeSystem/consent-form"
ENROLLMENT_CONSENT_FORM = Concept(SYSTEM_CONSENT_FORM, "enrollment")
ELECTRONIC_HEALTH_RECORDS_CONSENT_FORM = Concept(SYSTEM_CONSENT_FORM, "electronic-health-records")
SYSTEM_PPI_MODULE = "http://terminology.pmi-ops.org/CodeSystem/ppi-module"
OVERALL_HEALTH_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "overall-health")
PERSONAL_HABITS_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "personal-habits")
SOCIODEMOGRAPHICS_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "sociodemographics")
HEALTHCARE_ACCESS_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "healthcare-access")
MEDICAL_HISTORY_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "medical-history")
MEDICATIONS_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "medications")
FAMILY_HEALTH_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "family-health")
SYSTEM_PHYSICAL_MEASUREMENTS = "http://terminology.pmi-ops.org/CodeSystem/document-type"
PHYSICAL_MEASUREMENTS_CONCEPT_CODE_PREFIX = "intake-exam-v"
SYSTEM_LOINC = "http://loinc.org"
SYSTEM_FHIR_NULL = "http://hl7.org/fhir/v3/NullFlavor"
SYSTEM_PMI_BASE = "http://terminology.pmi-ops.org/CodeSystem/"
SYSTEM_UNIT_OF_MEASURE = "http://unitsofmeasure.org"
ASKED_BUT_NO_ANSWER = Concept(SYSTEM_FHIR_NULL, "ASKU")
PREFER_NOT_TO_SAY = Concept(SYSTEM_FHIR_NULL, "ASKU")
# Used in the questionnaire response.
ETHNICITY = Concept(SYSTEM_LOINC, "69490-1")
HISPANIC = Concept("http://hl7.org/fhir/v3/Ethnicity", "2135-2")
NON_HISPANIC = Concept("http://hl7.org/fhir/v3/Ethnicity", "2186-5")
RACE = Concept(SYSTEM_LOINC, "72826-1")
SYSTEM_RACE = "http://hl7.org/fhir/v3/Race"
AMERICAN_INDIAN_OR_ALASKA_NATIVE = Concept(SYSTEM_RACE, "1002-5")
BLACK_OR_AFRICAN_AMERICAN = Concept(SYSTEM_RACE, "2054-5")
ASIAN = Concept(SYSTEM_RACE, "2028-9")
NATIVE_HAWAIIAN_OR_OTHER_PACIFIC_ISLANDER = Concept(SYSTEM_RACE, "2076-8")
WHITE = Concept(SYSTEM_RACE, "2106-3")
OTHER_RACE = Concept(SYSTEM_RACE, "2131-1")
GENDER_IDENTITY = Concept(SYSTEM_LOINC, "76691-5")
SYSTEM_GENDER_IDENTITY = SYSTEM_PMI_BASE + "gender-identity"
FEMALE = Concept(SYSTEM_GENDER_IDENTITY, "female")
FEMALE_TO_MALE_TRANSGENDER = Concept(SYSTEM_GENDER_IDENTITY, "female-to-male-transgender")
MALE = Concept(SYSTEM_GENDER_IDENTITY, "male")
MALE_TO_FEMALE_TRANSGENDER = Concept(SYSTEM_GENDER_IDENTITY, "male-to-female-transgender")
INTERSEX = Concept(SYSTEM_GENDER_IDENTITY, "intersex")
OTHER_GENDER = Concept(SYSTEM_GENDER_IDENTITY, "other")
SYSTEM_PPI_QUESTION = SYSTEM_PMI_BASE + "ppi-question"
MEMBERSHIP_TIER = Concept(SYSTEM_PMI_BASE, "membership-tier")
SYSTEM_MEMBERSHIP_TIER = SYSTEM_PMI_BASE + "membership-tier"
REGISTERED = Concept(SYSTEM_MEMBERSHIP_TIER, "registered")
VOLUNTEER = Concept(SYSTEM_MEMBERSHIP_TIER, "volunteer")
FULL_PARTICIPANT = Concept(SYSTEM_MEMBERSHIP_TIER, "full-participant")
ENROLLEE = Concept(SYSTEM_MEMBERSHIP_TIER, "enrollee")
DATE_OF_BIRTH = Concept(SYSTEM_PPI_QUESTION, "date-of-birth")
FIRST_NAME = Concept(SYSTEM_PPI_QUESTION, "first-name")
MIDDLE_NAME = Concept(SYSTEM_PPI_QUESTION, "middle-name")
LAST_NAME = Concept(SYSTEM_PPI_QUESTION, "last-name")
STATE_OF_RESIDENCE = Concept(SYSTEM_LOINC, "46499-0")
SYSTEM_STATE = SYSTEM_PMI_BASE + "us-state"
STATE_LIST = [Concept(SYSTEM_STATE, s) for s in list(census_regions.keys())]
STATES_BY_ABBREV = {c.code: c for c in STATE_LIST}
# Used in physical measurements.
SYSTOLIC_BP = Concept(SYSTEM_LOINC, "8480-6")
DIASTOLIC_BP = Concept(SYSTEM_LOINC, "8462-4")
HEART_RATE = Concept(SYSTEM_LOINC, "8867-4")
WEIGHT = Concept(SYSTEM_LOINC, "29463-7")
BMI = Concept(SYSTEM_LOINC, "39156-5")
HIP_CIRCUMFERENCE = Concept(SYSTEM_LOINC, "62409-8")
WAIST_CIRCUMFERENCE = Concept(SYSTEM_LOINC, "56086-2")
# UNITS
UNIT_MM_HG = Concept(SYSTEM_UNIT_OF_MEASURE, "mm[Hg]")
UNIT_KG = Concept(SYSTEM_UNIT_OF_MEASURE, "kg")
UNIT_CM = Concept(SYSTEM_UNIT_OF_MEASURE, "cm")
UNIT_PER_MIN = Concept(SYSTEM_UNIT_OF_MEASURE, "/min")
UNIT_KG_M2 = Concept(SYSTEM_UNIT_OF_MEASURE, "kg/m2")
| bsd-3-clause | b257603cadfdfa39de71cf7c7d134b9b | 41.265306 | 98 | 0.750845 | 2.490679 | false | false | false | false |
django/django-localflavor | localflavor/id_/forms.py | 2 | 7603 | """ID-specific Form helpers."""
import re
import time
from django.core.exceptions import ImproperlyConfigured
from django.forms import ValidationError
from django.forms.fields import CharField, Select
from django.utils.translation import gettext_lazy as _
postcode_re = re.compile(r'^[1-9]\d{4}$')
plate_re = re.compile(r'^(?P<prefix>[A-Z]{1,2}) ' +
r'(?P<number>\d{1,5})( (?P<suffix>([A-Z]{1,3}|[1-9][0-9]{,2})))?$')
nik_re = re.compile(r'^\d{16}$')
WOMAN_IDENTIFIER = 40
class IDPostCodeField(CharField):
"""
An Indonesian post code field.
http://id.wikipedia.org/wiki/Kode_pos
"""
default_error_messages = {
'invalid': _('Enter a valid post code'),
}
def clean(self, value):
value = super().clean(value)
if value in self.empty_values:
return value
if not postcode_re.search(value):
raise ValidationError(self.error_messages['invalid'], code='invalid')
if int(value) < 10110:
raise ValidationError(self.error_messages['invalid'], code='invalid')
# 1xxx0
if value[0] == '1' and value[4] != '0':
raise ValidationError(self.error_messages['invalid'], code='invalid')
return '%s' % (value,)
class IDProvinceSelect(Select):
"""A Select widget that uses a list of provinces of Indonesia as its choices."""
def __init__(self, attrs=None):
# Load data in memory only when it is required, see also #17275
from .id_choices import PROVINCE_CHOICES
super().__init__(attrs, choices=PROVINCE_CHOICES)
class IDLicensePlatePrefixSelect(Select):
"""
A Select widget that uses a list of vehicle license plate prefix code of Indonesia as its choices.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
"""
def __init__(self, attrs=None):
# Load data in memory only when it is required, see also #17275
from .id_choices import LICENSE_PLATE_PREFIX_CHOICES
super().__init__(attrs, choices=LICENSE_PLATE_PREFIX_CHOICES)
class IDLicensePlateField(CharField):
"""
An Indonesian vehicle license plate field.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
Plus: "B 12345 12"
"""
default_error_messages = {
'invalid': _('Enter a valid vehicle license plate number'),
}
foreign_vehicles_prefixes = ('CD', 'CC')
def clean(self, value):
value = super().clean(value)
if value in self.empty_values:
return value
plate_number = re.sub(r'\s+', ' ', value).upper()
number, prefix, suffix = self._validate_regex_match(plate_number)
self._validate_prefix(prefix)
self._validate_jakarta(prefix, suffix)
self._validate_ri(prefix, suffix)
self._validate_number(number)
# CD, CC and B 12345 12
if len(number) == 5 or prefix in self.foreign_vehicles_prefixes:
self._validate_numeric_suffix(suffix)
self._validate_known_codes_range(number, prefix, suffix)
else:
self._validate_non_numeric_suffix(suffix)
return plate_number
def _validate_regex_match(self, plate_number):
matches = plate_re.search(plate_number)
if matches is None:
raise ValidationError(self.error_messages['invalid'], code='invalid')
prefix = matches.group('prefix')
suffix = matches.group('suffix')
number = matches.group('number')
return number, prefix, suffix
def _validate_number(self, number):
# Number can't be zero.
if number == '0':
raise ValidationError(self.error_messages['invalid'], code='invalid')
def _validate_known_codes_range(self, number, prefix, suffix):
# Known codes range is 12-124
if prefix in self.foreign_vehicles_prefixes and not (12 <= int(number) <= 124):
raise ValidationError(self.error_messages['invalid'], code='invalid')
if len(number) == 5 and not (12 <= int(suffix) <= 124):
raise ValidationError(self.error_messages['invalid'], code='invalid')
def _validate_numeric_suffix(self, suffix):
# suffix must be numeric and non-empty
if re.match(r'^\d+$', suffix) is None:
raise ValidationError(self.error_messages['invalid'], code='invalid')
def _validate_non_numeric_suffix(self, suffix):
# suffix must be non-numeric
if suffix is not None and re.match(r'^[A-Z]{,3}$', suffix) is None:
raise ValidationError(self.error_messages['invalid'], code='invalid')
def _validate_prefix(self, prefix):
# Load data in memory only when it is required, see also #17275
from .id_choices import LICENSE_PLATE_PREFIX_CHOICES
# Make sure prefix is in the list of known codes.
if prefix not in [choice[0] for choice in LICENSE_PLATE_PREFIX_CHOICES]:
raise ValidationError(self.error_messages['invalid'], code='invalid')
def _validate_ri(self, prefix, suffix):
# RI plates don't have suffix.
if prefix == 'RI' and suffix is not None and suffix != '':
raise ValidationError(self.error_messages['invalid'], code='invalid')
def _validate_jakarta(self, prefix, suffix):
# Only Jakarta (prefix B) can have 3 letter suffix.
if suffix is not None and len(suffix) == 3 and prefix != 'B':
raise ValidationError(self.error_messages['invalid'], code='invalid')
class IDNationalIdentityNumberField(CharField):
"""
An Indonesian national identity number (NIK/KTP#) field.
http://id.wikipedia.org/wiki/Nomor_Induk_Kependudukan
xx.xxxx.ddmmyy.xxxx - 16 digits (excl. dots)
notes: for women dd + 40
"""
default_error_messages = {
'invalid': _('Enter a valid NIK/KTP number'),
}
def clean(self, value):
value = super().clean(value)
if value in self.empty_values:
return value
# This replacement effectively means the value is always stripped.
value = re.sub(r'[\s.]', '', value)
if not nik_re.search(value):
raise ValidationError(self.error_messages['invalid'], code='invalid')
if int(value) == 0:
raise ValidationError(self.error_messages['invalid'], code='invalid')
year = int(value[10:12])
month = int(value[8:10])
day = int(value[6:8])
# for woman, birth date is added with 40
if day > 31:
day -= WOMAN_IDENTIFIER
current_year = time.localtime().tm_year
if year < int(str(current_year)[-2:]):
if not IDNationalIdentityNumberField._valid_nik_date(2000 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'], code='invalid')
elif not IDNationalIdentityNumberField._valid_nik_date(1900 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'], code='invalid')
if value[:6] == '000000' or value[12:] == '0000':
raise ValidationError(self.error_messages['invalid'], code='invalid')
return '%s.%s.%s.%s' % (value[:2], value[2:6], value[6:12], value[12:])
@staticmethod
def _valid_nik_date(year, month, day):
try:
t1 = (int(year), int(month), int(day), 0, 0, 0, 0, 0, -1)
d = time.mktime(t1)
t2 = time.localtime(d)
if t1[:3] != t2[:3]:
return False
else:
return True
except (OverflowError, ValueError):
return False
| bsd-3-clause | effff0f1f63004cec9187e7783c424aa | 34.863208 | 102 | 0.615546 | 3.795806 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/identifier.py | 1 | 2591 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/Identifier) on 2017-03-22.
# 2017, SMART Health IT.
from . import element
class Identifier(element.Element):
""" An identifier intended for computation.
A technical identifier - identifies some entity uniquely and unambiguously.
"""
resource_type = "Identifier"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assigner = None
""" Organization that issued id (may be just text).
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.period = None
""" Time period when id is/was valid for use.
Type `Period` (represented as `dict` in JSON). """
self.system = None
""" The namespace for the identifier value.
Type `str`. """
self.type = None
""" Description of identifier.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.use = None
""" usual | official | temp | secondary (If known).
Type `str`. """
self.value = None
""" The value that is unique.
Type `str`. """
super(Identifier, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Identifier, self).elementProperties()
js.extend([
("assigner", "assigner", fhirreference.FHIRReference, False, None, False),
("period", "period", period.Period, False, None, False),
("system", "system", str, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
("use", "use", str, False, None, False),
("value", "value", str, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| bsd-3-clause | b96d9e226c4a73597e982f21538b06e8 | 32.649351 | 102 | 0.598997 | 4.268534 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/fhirabstractresource.py | 1 | 6973 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Base class for FHIR resources.
# 2014, SMART Health IT.
from . import fhirabstractbase
class FHIRAbstractResource(fhirabstractbase.FHIRAbstractBase):
""" Extends the FHIRAbstractBase with server talking capabilities.
"""
resource_name = 'FHIRAbstractResource'
def __init__(self, jsondict=None, strict=True):
self._server = None
""" The server the instance was read from. """
# raise if "resourceType" does not match
if jsondict is not None and 'resourceType' in jsondict \
and jsondict['resourceType'] != self.resource_name:
raise Exception("Attempting to instantiate {} with resource data that defines a resourceType of \"{}\""
.format(self.__class__, jsondict['resourceType']))
super(FHIRAbstractResource, self).__init__(jsondict=jsondict, strict=strict)
@classmethod
def _with_json_dict(cls, jsondict):
""" Overridden to use a factory if called when "resourceType" is
defined in the JSON but does not match the receiver's resource_name.
"""
if not isinstance(jsondict, dict):
raise Exception("Cannot use this method with anything but a JSON dictionary, got {}"
.format(jsondict))
res_type = jsondict.get('resourceType')
if res_type and res_type != cls.resource_name:
return fhirelementfactory.FHIRElementFactory.instantiate(res_type, jsondict)
return super(FHIRAbstractResource, cls)._with_json_dict(jsondict)
def as_json(self):
js = super(FHIRAbstractResource, self).as_json()
js['resourceType'] = self.resource_name
return js
# MARK: Handling Paths
def relativeBase(self):
return self.__class__.resource_name
def relativePath(self):
return "{}/{}".format(self.relativeBase(), self.id)
# MARK: Server Connection
@property
def server(self):
""" Walks the owner hierarchy until it finds an owner with a server.
"""
if self._server is None:
owningRes = self.owningResource()
self._server = owningRes.server if owningRes is not None else None
return self._server
@classmethod
def read(cls, rem_id, server):
""" Read the resource with the given id from the given server. The
passed-in server instance must support a `request_json()` method call,
taking a relative path as first (and only mandatory) argument.
:param str rem_id: The id of the resource on the remote server
:param FHIRServer server: An instance of a FHIR server or compatible class
:returns: An instance of the receiving class
"""
if not rem_id:
raise Exception("Cannot read resource without remote id")
path = '{}/{}'.format(cls.resource_name, rem_id)
instance = cls.read_from(path, server)
instance._local_id = rem_id
return instance
@classmethod
def read_from(cls, path, server):
""" Requests data from the given REST path on the server and creates
an instance of the receiving class.
:param str path: The REST path to read from
:param FHIRServer server: An instance of a FHIR server or compatible class
:returns: An instance of the receiving class
"""
if not path:
raise Exception("Cannot read resource without REST path")
if server is None:
raise Exception("Cannot read resource without server instance")
ret = server.request_json(path)
instance = cls(jsondict=ret)
instance._server = server
return instance
def create(self, server):
""" Attempt to create the receiver on the given server, using a POST
command.
:param FHIRServer server: The server to create the receiver on
:returns: None or the response JSON on success
"""
srv = server or self.server
if srv is None:
raise Exception("Cannot create a resource without a server")
if self.id:
raise Exception("This resource already has an id, cannot create")
ret = srv.post_json(self.relativePath(), self.as_json())
if len(ret.text) > 0:
return ret.json()
return None
def update(self, server=None):
""" Update the receiver's representation on the given server, issuing
a PUT command.
:param FHIRServer server: The server to update the receiver on;
optional, will use the instance's `server` if needed.
:returns: None or the response JSON on success
"""
srv = server or self.server
if srv is None:
raise Exception("Cannot update a resource that does not have a server")
if not self.id:
raise Exception("Cannot update a resource that does not have an id")
ret = srv.put_json(self.relativePath(), self.as_json())
if len(ret.text) > 0:
return ret.json()
return None
def delete(self):
""" Delete the receiver from the given server with a DELETE command.
:returns: None or the response JSON on success
"""
if self.server is None:
raise Exception("Cannot delete a resource that does not have a server")
if not self.id:
raise Exception("Cannot delete a resource that does not have an id")
ret = self.server.delete_json(self.relativePath())
if len(ret.text) > 0:
return ret.json()
return None
# MARK: Search
def search(self, struct=None):
""" Search can be started via a dictionary containing a search
construct.
Calling this method with a search struct will return a `FHIRSearch`
object representing the search struct, with "$type" and "id" added.
:param dict struct: An optional search structure
:returns: A FHIRSearch instance
"""
if struct is None:
struct = {'$type': self.__class__.resource_name}
if self._local_id is not None or self.id is not None:
struct['id'] = self._local_id or self.id
return self.__class__.where(struct)
@classmethod
def where(cls, struct):
""" Search can be started via a dictionary containing a search
construct.
Calling this method with a search struct will return a `FHIRSearch`
object representing the search struct
:param dict struct: A search structure
:returns: A FHIRSearch instance
"""
return fhirsearch.FHIRSearch(cls, struct)
from . import fhirsearch
from . import fhirelementfactory
| bsd-3-clause | 8bec6c97950e31a248d050999b1386dd | 35.507853 | 115 | 0.60849 | 4.504522 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/offline/biobank_samples_pipeline.py | 1 | 39087 | """Reads a CSV that Biobank uploads to GCS and upserts to the BiobankStoredSample table.
Also updates ParticipantSummary data related to samples.
"""
import csv
import datetime
from dateutil.parser import parse
import logging
import math
import os
import pytz
from sqlalchemy import case
from sqlalchemy.orm import aliased, Query
from sqlalchemy.sql import func, or_
from typing import Dict
from rdr_service import clock, config
from rdr_service.api_util import list_blobs, open_cloud_file
from rdr_service.code_constants import PPI_SYSTEM, RACE_AIAN_CODE, RACE_QUESTION_CODE, WITHDRAWAL_CEREMONY_YES,\
WITHDRAWAL_CEREMONY_NO, WITHDRAWAL_CEREMONY_QUESTION_CODE
from rdr_service.config import BIOBANK_SAMPLES_DAILY_INVENTORY_FILE_PATTERN,\
BIOBANK_SAMPLES_MONTHLY_INVENTORY_FILE_PATTERN
from rdr_service.dao.biobank_stored_sample_dao import BiobankStoredSampleDao
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.database_utils import MYSQL_ISO_DATE_FORMAT, parse_datetime, replace_isodate
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.model.code import Code
from rdr_service.model.config_utils import from_client_biobank_id, get_biobank_id_prefix
from rdr_service.model.hpo import HPO
from rdr_service.model.organization import Organization
from rdr_service.model.participant import Participant
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.model.questionnaire import QuestionnaireQuestion
from rdr_service.model.questionnaire_response import QuestionnaireResponse, QuestionnaireResponseAnswer
from rdr_service.model.site import Site
from rdr_service.offline.bigquery_sync import dispatch_participant_rebuild_tasks
from rdr_service.offline.sql_exporter import SqlExporter
from rdr_service.participant_enums import BiobankOrderStatus, OrganizationType, get_sample_status_enum_value,\
WithdrawalStatus
# Format for dates in output filenames for the reconciliation report.
_FILENAME_DATE_FORMAT = "%Y-%m-%d"
# The output of the reconciliation report goes into this subdirectory within the upload bucket.
_REPORT_SUBDIR = "reconciliation"
_GENOMIC_SUBDIR_PREFIX = "genomic_water_line_test"
_BATCH_SIZE = 1000
# Biobank provides timestamps without time zone info, which should be in central time (see DA-235).
_INPUT_TIMESTAMP_FORMAT = "%Y/%m/%d %H:%M:%S" # like 2016/11/30 14:32:18
_US_CENTRAL = pytz.timezone("US/Central")
# The timestamp found at the end of input CSV files.
INPUT_CSV_TIME_FORMAT = "%Y-%m-%d-%H-%M-%S"
_INPUT_CSV_TIME_FORMAT_LENGTH = 18
_CSV_SUFFIX_LENGTH = 4
_THIRTY_SIX_HOURS_AGO = datetime.timedelta(hours=36)
_MAX_INPUT_AGE = datetime.timedelta(hours=24)
_PMI_OPS_SYSTEM = "https://www.pmi-ops.org"
_CE_QUEST_SYSTEM = "http://careevolution.com/CareTask"
_KIT_ID_SYSTEM = "https://orders.mayomedicallaboratories.com/kit-id"
_TRACKING_NUMBER_SYSTEM = "https://orders.mayomedicallaboratories.com/tracking-number"
class DataError(RuntimeError):
"""Bad sample data during import.
Args:
msg: Passed through to superclass.
external: If True, this error should be reported to external partners (Biobank). Externally
reported DataErrors are only reported if biobank recipients are in the config.
"""
def __init__(self, msg, external=False):
super(DataError, self).__init__(msg)
self.external = external
def upsert_from_latest_csv():
csv_file_path, csv_filename, timestamp = get_last_biobank_sample_file_info()
now = clock.CLOCK.now()
if now - timestamp > _MAX_INPUT_AGE:
raise DataError(
"Input %r (timestamp %s UTC) is > 24h old (relative to %s UTC), not importing."
% (csv_filename, timestamp, now),
external=True,
)
with open_cloud_file(csv_file_path) as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter="\t")
written, biobank_ids = _upsert_samples_from_csv(csv_reader)
since_ts = clock.CLOCK.now()
dao = ParticipantSummaryDao()
dao.update_from_biobank_stored_samples(biobank_ids=biobank_ids)
update_bigquery_sync_participants(since_ts, dao)
return written, timestamp
def update_bigquery_sync_participants(ts, dao):
"""
Update all participants modified by the biobank reconciliation process.
:param ts: Timestamp
:param dao: DAO Object
"""
batch_size = 250
with dao.session() as session:
participants = session.query(ParticipantSummary.participantId) \
.filter(ParticipantSummary.lastModified > ts).all()
total_rows = len(participants)
count = int(math.ceil(float(total_rows) / float(batch_size)))
logging.info('Biobank: calculated {0} tasks from {1} records with a batch size of {2}.'.
format(count, total_rows, batch_size))
pids = [participant.participantId for participant in participants]
dispatch_participant_rebuild_tasks(pids, batch_size=batch_size)
def get_last_biobank_sample_file_info(monthly=False):
"""Finds the latest CSV & updates/inserts BiobankStoredSamples from its rows."""
bucket_name = config.getSetting(config.BIOBANK_SAMPLES_BUCKET_NAME) # raises if missing
if monthly:
bucket_name = bucket_name + "/60_day_manifests"
csv_file_path, csv_filename = _open_latest_samples_file(bucket_name, monthly=monthly)
timestamp = _timestamp_from_filename(csv_filename)
return csv_file_path, csv_filename, timestamp
def _timestamp_from_filename(csv_filename):
if len(csv_filename) < _INPUT_CSV_TIME_FORMAT_LENGTH + _CSV_SUFFIX_LENGTH:
raise DataError("Can't parse time from CSV filename: %s" % csv_filename)
time_suffix = csv_filename[
len(csv_filename)
- (_INPUT_CSV_TIME_FORMAT_LENGTH + _CSV_SUFFIX_LENGTH)
- 1 : len(csv_filename)
- _CSV_SUFFIX_LENGTH
]
try:
timestamp = datetime.datetime.strptime(time_suffix, INPUT_CSV_TIME_FORMAT)
except ValueError:
raise DataError("Can't parse time from CSV filename: %s" % csv_filename)
# Assume file times are in Central time (CST or CDT); convert to UTC.
return _US_CENTRAL.localize(timestamp).astimezone(pytz.utc).replace(tzinfo=None)
def _open_latest_samples_file(cloud_bucket_name, monthly=False):
"""Returns an open stream for the most recently created CSV in the given bucket."""
blob_name = _find_latest_samples_csv(cloud_bucket_name, monthly)
file_name = os.path.basename(blob_name)
path = os.path.normpath(cloud_bucket_name + '/' + blob_name)
logging.info(f'Using CSV from {cloud_bucket_name} as latest samples file: {file_name}')
return path, file_name
def _find_latest_samples_csv(cloud_bucket_name, monthly=False):
"""Returns the full path (including bucket name) of the most recently created CSV in the bucket.
Raises:
RuntimeError: if no CSVs are found in the cloud storage bucket.
"""
if monthly:
file_name_pattern = config.getSettingJson(BIOBANK_SAMPLES_MONTHLY_INVENTORY_FILE_PATTERN)
else:
file_name_pattern = config.getSettingJson(BIOBANK_SAMPLES_DAILY_INVENTORY_FILE_PATTERN)
bucket_stat_list = list_blobs(cloud_bucket_name)
if not bucket_stat_list:
raise DataError("No files in cloud bucket %r." % cloud_bucket_name)
# GCS does not really have the concept of directories (it's just a filename convention), so all
# directory listings are recursive and we must filter out subdirectory contents.
bucket_stat_list = [s for s in bucket_stat_list
if s.name.lower().endswith(".csv")
and file_name_pattern in s.name]
if not bucket_stat_list:
raise DataError("No CSVs in cloud bucket %r (all files: %s)." % (cloud_bucket_name, bucket_stat_list))
bucket_stat_list.sort(key=lambda s: s.updated)
return bucket_stat_list[-1].name
class CsvColumns(object):
"""Names of CSV columns that we read from the Biobank samples upload."""
SAMPLE_ID = "Sample Id"
PARENT_ID = "Parent Sample Id"
CONFIRMED_DATE = "Sample Confirmed Date"
EXTERNAL_PARTICIPANT_ID = "External Participant Id"
BIOBANK_ORDER_IDENTIFIER = "Sent Order Id"
TEST_CODE = "Test Code"
CREATE_DATE = "Sample Family Create Date"
STATUS = "Sample Disposal Status"
DISPOSAL_DATE = "Sample Disposed Date"
SAMPLE_FAMILY = "Sample Family Id"
# Note: Please ensure changes to the CSV format are reflected in fake_biobanks_sample_generator.
ALL = (
SAMPLE_ID,
PARENT_ID,
CONFIRMED_DATE,
EXTERNAL_PARTICIPANT_ID,
BIOBANK_ORDER_IDENTIFIER,
TEST_CODE,
CREATE_DATE,
STATUS,
DISPOSAL_DATE,
SAMPLE_FAMILY,
)
def _upsert_samples_from_csv(csv_reader):
"""Inserts/updates BiobankStoredSamples from a csv.DictReader."""
missing_cols = set(CsvColumns.ALL) - set(csv_reader.fieldnames)
if missing_cols:
raise DataError("CSV is missing columns %s, had columns %s." % (missing_cols, csv_reader.fieldnames))
samples_dao = BiobankStoredSampleDao()
biobank_id_prefix = get_biobank_id_prefix()
biobank_ids = set()
written = 0
try:
samples = []
with ParticipantDao().session() as session:
for row in csv_reader:
sample = _create_sample_from_row(row, biobank_id_prefix)
if sample:
# DA-601 - Ensure biobank_id exists before accepting a sample record.
if session.query(Participant).filter(Participant.biobankId == sample.biobankId).count() < 1:
logging.error(
"Bio bank Id ({0}) does not exist in the Participant table.".format(sample.biobankId)
)
continue
samples.append(sample)
if len(samples) >= _BATCH_SIZE:
written += samples_dao.upsert_all(samples)
samples = []
biobank_ids.add(sample.biobankId)
if samples:
written += samples_dao.upsert_all(samples)
return written, biobank_ids
except ValueError:
raise DataError("Error upserting samples from CSV")
def _parse_timestamp(row, key, sample):
str_val = row[key]
if str_val:
try:
naive = datetime.datetime.strptime(str_val, _INPUT_TIMESTAMP_FORMAT)
except ValueError:
raise DataError(
"Sample %r for %r has bad timestamp %r"
% (sample.biobankStoredSampleId, sample.biobankId, str_val)
)
# Assume incoming times are in Central time (CST or CDT). Convert to UTC for storage, but drop
# tzinfo since storage is naive anyway (to make stored/fetched values consistent).
return _US_CENTRAL.localize(naive).astimezone(pytz.utc).replace(tzinfo=None)
return None
def _create_sample_from_row(row, biobank_id_prefix):
"""Creates a new BiobankStoredSample object from a CSV row.
Raises:
DataError if the row is invalid.
Returns:
A new BiobankStoredSample, or None if the row should be skipped.
"""
biobank_id_str = row[CsvColumns.EXTERNAL_PARTICIPANT_ID]
if not biobank_id_str.startswith(biobank_id_prefix):
# This is a biobank sample for another environment. Ignore it.
return None
if CsvColumns.BIOBANK_ORDER_IDENTIFIER not in row:
return None
biobank_id = from_client_biobank_id(biobank_id_str)
sample = BiobankStoredSample(
biobankStoredSampleId=row[CsvColumns.SAMPLE_ID],
biobankId=biobank_id,
biobankOrderIdentifier=row[CsvColumns.BIOBANK_ORDER_IDENTIFIER],
test=row[CsvColumns.TEST_CODE],
)
if row[CsvColumns.PARENT_ID]:
# Skip child samples.
return None
sample.confirmed = _parse_timestamp(row, CsvColumns.CONFIRMED_DATE, sample)
sample.created = _parse_timestamp(row, CsvColumns.CREATE_DATE, sample)
sample.status = get_sample_status_enum_value(row[CsvColumns.STATUS])
sample.disposed = _parse_timestamp(row, CsvColumns.DISPOSAL_DATE, sample)
sample.family_id = row[CsvColumns.SAMPLE_FAMILY]
return sample
def write_reconciliation_report(now, report_type="daily"):
"""Writes order/sample reconciliation reports to GCS."""
bucket_name = config.getSetting(config.BIOBANK_SAMPLES_BUCKET_NAME) # raises if missing
_query_and_write_reports(
SqlExporter(bucket_name), now, report_type, *_get_report_paths(now, report_type)
)
def _get_report_paths(report_datetime, report_type="daily"):
"""Returns a list of output filenames for samples: (received, late, missing, withdrawals, salivary_missing)."""
report_name_suffix = ("received", "missing", "modified", "salivary_missing")
if report_type == "monthly":
report_name_suffix = ("received_monthly", "missing_monthly", "modified_monthly")
return [_get_report_path(report_datetime, report_name) for report_name in report_name_suffix]
def _get_report_path(report_datetime, report_name):
report_date_str = report_datetime.strftime(_FILENAME_DATE_FORMAT)
return f'{_REPORT_SUBDIR}/report_{report_date_str}_{report_name}.csv'
def get_withdrawal_report_query(start_date: datetime):
"""
Generates a report on participants that have withdrawn in the past n days and have samples collected,
including their biobank ID, withdrawal time, their origin, and whether they are Native American
(as biobank samples for Native Americans are disposed of differently)
"""
ceremony_answer_subquery = _participant_answer_subquery(WITHDRAWAL_CEREMONY_QUESTION_CODE)
return (
Query([
Participant.participantId.label('participant_id'),
Participant.biobankId.label('biobank_id'),
func.date_format(Participant.withdrawalTime, MYSQL_ISO_DATE_FORMAT).label('withdrawal_time'),
case([(_participant_has_answer(RACE_QUESTION_CODE, RACE_AIAN_CODE), 'Y')], else_='N')
.label('is_native_american'),
case([
(ceremony_answer_subquery.c.value == WITHDRAWAL_CEREMONY_YES, 'Y'),
(ceremony_answer_subquery.c.value == WITHDRAWAL_CEREMONY_NO, 'N'),
], else_=(
case([(_participant_has_answer(RACE_QUESTION_CODE, RACE_AIAN_CODE), 'U')], else_='NA')
)
).label('needs_disposal_ceremony'),
Participant.participantOrigin.label('participant_origin'),
HPO.name.label('paired_hpo'),
Organization.externalId.label('paired_org'),
Site.googleGroup.label('paired_site'),
Participant.withdrawalReasonJustification.label('withdrawal_reason_justification'),
ParticipantSummary.deceasedStatus.label('deceased_status')
])
.select_from(Participant)
.outerjoin(ceremony_answer_subquery, ceremony_answer_subquery.c.participant_id == Participant.participantId)
.join(BiobankStoredSample, BiobankStoredSample.biobankId == Participant.biobankId)
.outerjoin(ParticipantSummary, ParticipantSummary.participantId == Participant.participantId)
.outerjoin(HPO, HPO.hpoId == Participant.hpoId)
.outerjoin(Organization, Organization.organizationId == Participant.organizationId)
.outerjoin(Site, Site.siteId == Participant.siteId)
.filter(
Participant.withdrawalStatus != WithdrawalStatus.NOT_WITHDRAWN,
or_(
Participant.withdrawalTime > start_date,
BiobankStoredSample.created > start_date
)
).distinct()
)
def _build_query_params(start_date: datetime):
code_dao = CodeDao()
race_question_code = code_dao.get_code(PPI_SYSTEM, RACE_QUESTION_CODE)
native_american_race_code = code_dao.get_code(PPI_SYSTEM, RACE_AIAN_CODE)
return {
"race_question_code_id": race_question_code.codeId,
"native_american_race_code_id": native_american_race_code.codeId,
"biobank_id_prefix": get_biobank_id_prefix(),
"pmi_ops_system": _PMI_OPS_SYSTEM,
"ce_quest_system": _CE_QUEST_SYSTEM,
"kit_id_system": _KIT_ID_SYSTEM,
"tracking_number_system": _TRACKING_NUMBER_SYSTEM,
"n_days_ago": start_date,
"dv_order_filter": 0
}
def _query_and_write_received_report(exporter, report_path, query_params, report_predicate):
received_report_select = _RECONCILIATION_REPORT_SELECTS_SQL
if config.getSettingJson(config.ENABLE_BIOBANK_MANIFEST_RECEIVED_FLAG, default=False):
received_report_select += """,
group_concat(ny_flag) ny_flag,
group_concat(sex_at_birth_flag) sex_at_birth_flag
"""
logging.info(f"Writing {report_path} report.")
received_sql = replace_isodate(received_report_select + _RECONCILIATION_REPORT_SOURCE_SQL)
exporter.run_export(
report_path,
received_sql,
query_params,
backup=True,
predicate=report_predicate
)
logging.info(f"Completed {report_path} report.")
def _query_and_write_reports(exporter, now: datetime, report_type, path_received,
path_missing, path_modified, path_salivary_missing=None):
"""Runs the reconciliation MySQL queries and writes result rows to the given CSV writers.
Note that due to syntax differences, the query runs on MySQL only (not SQLite in unit tests).
"""
report_cover_range = 10
if report_type == "monthly":
report_cover_range = 60
# Gets all sample/order pairs where everything arrived, within the past n days.
received_predicate = lambda result: (
result[_RECEIVED_TEST_INDEX]
and result[_SENT_COUNT_INDEX] <= result[_RECEIVED_COUNT_INDEX]
and in_past_n_days(result, now, report_cover_range)
)
# Gets samples or orders where something has gone missing within the past n days, and if an order
# was placed, it was placed at least 36 hours ago.
missing_predicate = lambda result: (
(
result[_SENT_COUNT_INDEX] != result[_RECEIVED_COUNT_INDEX]
or (result[_SENT_FINALIZED_INDEX] and not result[_RECEIVED_TEST_INDEX])
)
and in_past_n_days(result, now, report_cover_range, ordered_before=now - _THIRTY_SIX_HOURS_AGO)
and result[_EDITED_CANCELLED_RESTORED_STATUS_FLAG_INDEX] != 'cancelled'
)
# Gets samples or orders where something has modified within the past n days.
modified_predicate = lambda result: (
result[_EDITED_CANCELLED_RESTORED_STATUS_FLAG_INDEX] and in_past_n_days(result, now, report_cover_range)
)
# break into three steps to avoid OOM issue
report_paths = [path_missing, path_modified]
report_predicates = [missing_predicate, modified_predicate]
query_params = _build_query_params(start_date=now - datetime.timedelta(days=(report_cover_range + 1)))
_query_and_write_received_report(exporter, path_received, query_params, received_predicate)
for report_path, report_predicate in zip(report_paths, report_predicates):
if report_path == path_missing:
query_params['dv_order_filter'] = 1
logging.info(f"Writing {report_path} report.")
exporter.run_export(
report_path,
replace_isodate(_RECONCILIATION_REPORT_SELECTS_SQL + _RECONCILIATION_REPORT_SOURCE_SQL),
query_params,
backup=True,
predicate=report_predicate
)
logging.info(f"Completed {report_path} report.")
# Check if cumulative received report should be generated
# biobank_cumulative_received_schedule should be a dictionary with keys giving when the report should
# run, and the values giving the dates that should be used for the first start date.
cumulative_received_schedule: Dict[str, str] = config.getSettingJson(
config.BIOBANK_CUMULATIVE_RECEIVED_SCHEDULE,
default={}
)
for run_date, start_date in cumulative_received_schedule.items():
if parse(run_date).date() == now.date():
report_start_date = parse(start_date)
cumulative_received_params = _build_query_params(start_date=report_start_date)
_query_and_write_received_report(
exporter=exporter,
report_path=_get_report_path(report_datetime=now, report_name='cumulative_received'),
query_params=cumulative_received_params,
report_predicate=received_predicate
)
# Generate the missing salivary report, within last n days
if report_type != "monthly" and path_salivary_missing is not None:
missing_report_day_interval = config.getSettingJson(
config.BIOBANK_MISSING_REPORT_DAY_INTERVAL,
default=report_cover_range
)
exporter.run_export(
path_salivary_missing,
_SALIVARY_MISSING_REPORT_SQL,
{
"biobank_id_prefix": get_biobank_id_prefix(),
"n_days_interval": missing_report_day_interval,
},
backup=True,
)
logging.info("Completed monthly reconciliation report.")
# Indexes from the SQL query below; used in predicates.
_SENT_COUNT_INDEX = 2
_SENT_COLLECTION_TIME_INDEX = 4
_SENT_FINALIZED_INDEX = 6
_RECEIVED_TEST_INDEX = 16
_RECEIVED_COUNT_INDEX = 17
# TODO: remove received time once Biobank stops using it (DA-374)
_RECEIVED_TIME_INDEX = 19
_SAMPLE_FAMILY_CREATE_TIME_INDEX = 20
_ELAPSED_HOURS_INDEX = 21
_EDITED_CANCELLED_RESTORED_STATUS_FLAG_INDEX = 28
_ORDER_JOINS = """
biobank_order
INNER JOIN
participant
ON
biobank_order.participant_id = participant.participant_id
INNER JOIN
biobank_order_identifier
ON biobank_order.biobank_order_id = biobank_order_identifier.biobank_order_id
AND biobank_order_identifier.system in (:pmi_ops_system, :ce_quest_system)
INNER JOIN
biobank_ordered_sample
ON
biobank_order.biobank_order_id = biobank_ordered_sample.order_id
LEFT OUTER JOIN
site source_site
ON biobank_order.source_site_id = source_site.site_id
LEFT OUTER JOIN
hpo source_site_hpo
ON source_site.hpo_id = source_site_hpo.hpo_id
LEFT OUTER JOIN
site finalized_site
ON biobank_order.finalized_site_id = finalized_site.site_id
LEFT OUTER JOIN
site collected_site
ON biobank_order.collected_site_id = collected_site.site_id
LEFT OUTER JOIN
hpo finalized_site_hpo
ON finalized_site.hpo_id = finalized_site_hpo.hpo_id
LEFT OUTER JOIN
site restored_site
ON biobank_order.restored_site_id = restored_site.site_id
LEFT OUTER JOIN
site amended_site
ON biobank_order.amended_site_id = amended_site.site_id
LEFT OUTER JOIN
site cancelled_site
ON biobank_order.cancelled_site_id = cancelled_site.site_id
"""
_STORED_SAMPLE_JOIN_CRITERIA = """
biobank_stored_sample.biobank_id = participant.biobank_id
AND biobank_stored_sample.test = biobank_ordered_sample.test
AND biobank_stored_sample.biobank_order_identifier = CASE
WHEN biobank_order_identifier.system = :ce_quest_system THEN kit_id_identifier.value
ELSE biobank_order_identifier.value
END
AND biobank_ordered_sample.finalized IS NOT NULL
AND biobank_stored_sample.confirmed IS NOT NULL
"""
def _get_hpo_type_sql(hpo_alias):
result = "(CASE "
for organization_type in OrganizationType:
result += "WHEN %s.organization_type = %d THEN '%s' " % (
hpo_alias,
organization_type.number,
organization_type.name,
)
result += "ELSE 'UNKNOWN' END)"
return result
def _get_status_flag_sql():
result = """
CASE
WHEN biobank_order.order_status = {amended} THEN 'edited'
WHEN biobank_order.order_status = {cancelled} THEN 'cancelled'
WHEN biobank_order.order_status = {unset} AND biobank_order.restored_time IS NOT NULL
THEN 'restored'
ELSE NULL
END edited_cancelled_restored_status_flag,
CASE
WHEN biobank_order.order_status = {amended} THEN biobank_order.amended_username
WHEN biobank_order.order_status = {cancelled} THEN biobank_order.cancelled_username
WHEN biobank_order.order_status = {unset} AND biobank_order.restored_time IS NOT NULL
THEN biobank_order.restored_username
ELSE NULL
END edited_cancelled_restored_name,
CASE
WHEN biobank_order.order_status = {amended} THEN amended_site.site_name
WHEN biobank_order.order_status = {cancelled} THEN cancelled_site.site_name
WHEN biobank_order.order_status = {unset} AND biobank_order.restored_time IS NOT NULL
THEN restored_site.site_name
ELSE NULL
END edited_cancelled_restored_site_name,
CASE
WHEN biobank_order.order_status = {amended} THEN biobank_order.amended_time
WHEN biobank_order.order_status = {cancelled} THEN biobank_order.cancelled_time
WHEN biobank_order.order_status = {unset} AND biobank_order.restored_time IS NOT NULL
THEN biobank_order.restored_time
ELSE NULL
END edited_cancelled_restored_site_time,
CASE
WHEN biobank_order.order_status = {amended} OR biobank_order.order_status = {cancelled} OR
(biobank_order.order_status = {unset} AND biobank_order.restored_time IS NOT NULL)
THEN biobank_order.amended_reason
ELSE NULL
END edited_cancelled_restored_site_reason
""".format(
amended=int(BiobankOrderStatus.AMENDED),
cancelled=int(BiobankOrderStatus.CANCELLED),
unset=int(BiobankOrderStatus.UNSET),
)
return result
# Used in the context of queries where "participant" is the table for the participant being
# selected.
_NATIVE_AMERICAN_SQL = """
(SELECT (CASE WHEN count(*) > 0 THEN 'Y' ELSE 'N' END)
FROM questionnaire_response qr
INNER JOIN questionnaire_response_answer qra
ON qra.questionnaire_response_id = qr.questionnaire_response_id
INNER JOIN questionnaire_question qq
ON qra.question_id = qq.questionnaire_question_id
WHERE qr.participant_id = participant.participant_id
AND qq.code_id = :race_question_code_id
AND qra.value_code_id = :native_american_race_code_id
AND qra.end_time IS NULL) is_native_american"""
def _participant_answer_subquery(question_code_value):
question_code = aliased(Code)
answer_code = aliased(Code)
return (
Query([QuestionnaireResponse.participantId, answer_code.value])
.select_from(QuestionnaireResponse)
.join(QuestionnaireResponseAnswer)
.join(QuestionnaireQuestion)
.join(question_code, question_code.codeId == QuestionnaireQuestion.codeId)
.join(answer_code, answer_code.codeId == QuestionnaireResponseAnswer.valueCodeId)
.filter(
QuestionnaireResponse.participantId == Participant.participantId,
question_code.value == question_code_value,
QuestionnaireResponseAnswer.endTime.is_(None)
)
.subquery()
)
def _participant_has_answer(question_code_value, answer_value):
question_code = aliased(Code)
answer_code = aliased(Code)
return (
Query([QuestionnaireResponse])
.join(QuestionnaireResponseAnswer)
.join(QuestionnaireQuestion)
.join(question_code, question_code.codeId == QuestionnaireQuestion.codeId)
.join(answer_code, answer_code.codeId == QuestionnaireResponseAnswer.valueCodeId)
.filter(
QuestionnaireResponse.participantId == Participant.participantId, # Expected from outer query
question_code.value == question_code_value,
answer_code.value == answer_value,
QuestionnaireResponseAnswer.endTime.is_(None)
).exists()
)
# Joins orders and samples, and computes some derived values (elapsed_hours, counts).
# MySQL does not support FULL OUTER JOIN, so instead we UNION ALL a LEFT OUTER JOIN
# with a SELECT... WHERE NOT EXISTS (the latter for cases where we have a sample but no matching
# ordered sample.)
# Column order should match _*_INDEX constants above.
# Biobank ID formatting must match to_client_biobank_id.
_RECONCILIATION_REPORT_SELECTS_SQL = """
SELECT
CONCAT(:biobank_id_prefix, raw_biobank_id) biobank_id,
order_test sent_test,
SUM(finalized is not NULL) sent_count,
biobank_order_id sent_order_id,
ISODATE[MAX(collected)] sent_collection_time,
ISODATE[MAX(processed)] sent_processed_time,
ISODATE[MAX(finalized)] sent_finalized_time,
GROUP_CONCAT(DISTINCT source_site_name) source_site_name,
GROUP_CONCAT(DISTINCT source_site_mayolink_client_number) source_site_mayolink_client_number,
GROUP_CONCAT(DISTINCT source_site_hpo) source_site_hpo,
GROUP_CONCAT(DISTINCT source_site_hpo_type) source_site_hpo_type,
GROUP_CONCAT(DISTINCT finalized_site_name) finalized_site_name,
GROUP_CONCAT(DISTINCT finalized_site_mayolink_client_number)
finalized_site_mayolink_client_number,
GROUP_CONCAT(DISTINCT finalized_site_hpo) finalized_site_hpo,
GROUP_CONCAT(DISTINCT finalized_site_hpo_type) finalized_site_hpo_type,
GROUP_CONCAT(DISTINCT finalized_username) finalized_username,
test received_test,
COUNT(DISTINCT biobank_stored_sample_id) received_count,
GROUP_CONCAT(DISTINCT biobank_stored_sample_id) received_sample_id,
ISODATE[MAX(confirmed)] received_time,
ISODATE[MAX(created)] 'Sample Family Create Date',
TIMESTAMPDIFF(HOUR, MAX(collected), MAX(created)) elapsed_hours,
GROUP_CONCAT(DISTINCT biospecimen_kit_id) biospecimen_kit_id,
GROUP_CONCAT(DISTINCT fedex_tracking_number) fedex_tracking_number,
GROUP_CONCAT(DISTINCT is_native_american) is_native_american,
GROUP_CONCAT(notes_collected) notes_collected,
GROUP_CONCAT(notes_processed) notes_processed,
GROUP_CONCAT(notes_finalized) notes_finalized,
GROUP_CONCAT(edited_cancelled_restored_status_flag) edited_cancelled_restored_status_flag,
GROUP_CONCAT(edited_cancelled_restored_name) edited_cancelled_restored_name,
GROUP_CONCAT(edited_cancelled_restored_site_name) edited_cancelled_restored_site_name,
GROUP_CONCAT(edited_cancelled_restored_site_time) edited_cancelled_restored_site_time,
GROUP_CONCAT(edited_cancelled_restored_site_reason) edited_cancelled_restored_site_reason,
GROUP_CONCAT(DISTINCT order_origin) biobank_order_origin,
participant_origin
"""
_RECONCILIATION_REPORT_SOURCE_SQL = (
"""
FROM
(SELECT
participant.biobank_id raw_biobank_id,
CASE
WHEN biobank_order_identifier.system = :ce_quest_system THEN kit_id_identifier.value
ELSE biobank_order_identifier.value
END biobank_order_id,
source_site.site_name source_site_name,
source_site.mayolink_client_number source_site_mayolink_client_number,
source_site_hpo.name source_site_hpo,
"""
+ _get_hpo_type_sql("source_site_hpo")
+ """ source_site_hpo_type,
finalized_site.site_name finalized_site_name,
finalized_site.mayolink_client_number finalized_site_mayolink_client_number,
finalized_site_hpo.name finalized_site_hpo,
"""
+ _get_hpo_type_sql("finalized_site_hpo")
+ """ finalized_site_hpo_type,
biobank_order.finalized_username finalized_username,
biobank_ordered_sample.test order_test,
biobank_ordered_sample.collected,
biobank_ordered_sample.processed,
biobank_ordered_sample.finalized,
biobank_stored_sample.biobank_stored_sample_id,
biobank_stored_sample.test,
biobank_stored_sample.confirmed,
biobank_stored_sample.created,
kit_id_identifier.value biospecimen_kit_id,
tracking_number_identifier.value fedex_tracking_number, """
+ _NATIVE_AMERICAN_SQL
+ """,
biobank_order.collected_note notes_collected,
biobank_order.processed_note notes_processed,
biobank_order.finalized_note notes_finalized,
biobank_order.order_origin,
"""
+ _get_status_flag_sql()
+ """,
participant.participant_origin,
case when collected_site.site_id is not null then (case when collected_site.state = 'NY' then 'Y' else 'N' end)
when mko_state_code.code_id is not null then
(case when mko_state_code.value like 'state_ny' then 'Y' else 'N' end)
else 'NA'
end ny_flag,
case when sex_code.value like 'sexatbirth_male' then 'M'
when sex_code.value like 'sexatbirth_female' then 'F'
else 'NA'
end sex_at_birth_flag
FROM """
+ _ORDER_JOINS
+ """
LEFT OUTER JOIN
participant_summary
ON participant_summary.participant_id = participant.participant_id
LEFT OUTER JOIN
code sex_code
ON participant_summary.sex_id = sex_code.code_id
LEFT OUTER JOIN
biobank_mail_kit_order dv_order
ON dv_order.biobank_order_id = biobank_order.biobank_order_id
AND dv_order.is_test_sample IS NOT TRUE
AND dv_order.associated_hpo_id IS NULL
LEFT OUTER JOIN
code mko_state_code
ON mko_state_code.code_id = dv_order.state_id
LEFT OUTER JOIN
biobank_order_identifier kit_id_identifier
ON biobank_order.biobank_order_id = kit_id_identifier.biobank_order_id
AND kit_id_identifier.system = :kit_id_system
LEFT OUTER JOIN
biobank_stored_sample
ON """
+ _STORED_SAMPLE_JOIN_CRITERIA
+ """
LEFT OUTER JOIN
biobank_order_identifier tracking_number_identifier
ON biobank_order.biobank_order_id = tracking_number_identifier.biobank_order_id
AND tracking_number_identifier.system = :tracking_number_system
WHERE
participant.withdrawal_time IS NULL
AND NOT EXISTS (
SELECT 0 FROM participant
WHERE participant.participant_id = dv_order.participant_id
)
AND
(
(confirmed IS NOT NULL AND confirmed >= :n_days_ago) OR (collected IS NOT NULL AND collected >= :n_days_ago)
)
UNION ALL
SELECT
biobank_stored_sample.biobank_id raw_biobank_id,
biobank_stored_sample.biobank_order_identifier,
NULL source_site_name,
NULL source_site_mayolink_client_number,
NULL source_site_hpo,
NULL source_site_hpo_type,
NULL finalized_site_name,
NULL finalized_site_mayolink_client_number,
NULL finalized_site_hpo,
NULL finalized_site_hpo_type,
NULL finalized_username,
NULL order_test,
NULL collected,
NULL processed,
NULL finalized,
biobank_stored_sample.biobank_stored_sample_id,
biobank_stored_sample.test,
biobank_stored_sample.confirmed,
biobank_stored_sample.created,
NULL biospecimen_kit_id,
NULL fedex_tracking_number, """
+ _NATIVE_AMERICAN_SQL
+ """,
NULL notes_collected,
NULL notes_processed,
NULL notes_finalized,
NULL edited_cancelled_restored_status_flag,
NULL edited_cancelled_restored_name,
NULL edited_cancelled_restored_site_name,
NULL edited_cancelled_restored_site_time,
NULL edited_cancelled_restored_site_reason,
NULL order_origin,
participant.participant_origin,
'NA' ny_flag,
case when sex_code.value like 'sexatbirth_male' then 'M'
when sex_code.value like 'sexatbirth_female' then 'F'
else 'NA'
end sex_at_birth_flag
FROM
biobank_stored_sample
LEFT OUTER JOIN
participant ON biobank_stored_sample.biobank_id = participant.biobank_id
LEFT OUTER JOIN
participant_summary ON participant_summary.participant_id = participant.participant_id
LEFT OUTER JOIN
code sex_code ON participant_summary.sex_id = sex_code.code_id
WHERE biobank_stored_sample.confirmed IS NOT NULL AND NOT EXISTS (
SELECT 0 FROM """
+ _ORDER_JOINS + """
LEFT OUTER JOIN
biobank_order_identifier kit_id_identifier
ON biobank_order.biobank_order_id = kit_id_identifier.biobank_order_id
AND kit_id_identifier.system = :kit_id_system
WHERE
"""
+ _STORED_SAMPLE_JOIN_CRITERIA
+ """
) AND NOT EXISTS (
SELECT 0 FROM participant
WHERE participant.biobank_id = biobank_stored_sample.biobank_id
AND participant.withdrawal_time IS NOT NULL)
AND
(
CASE
WHEN 1 = :dv_order_filter THEN
biobank_stored_sample.biobank_id NOT IN (
SELECT p.biobank_id
FROM participant p
JOIN biobank_mail_kit_order dv
ON p.participant_id = dv.participant_id
AND dv.is_test_sample IS NOT TRUE
AND dv.associated_hpo_id IS NULL)
ELSE TRUE
END
)
AND confirmed IS NOT NULL AND confirmed >= :n_days_ago
) reconciled
GROUP BY
biobank_id, participant_origin, sent_order_id, order_test, test
ORDER BY
ISODATE[MAX(collected)], ISODATE[MAX(confirmed)], GROUP_CONCAT(DISTINCT biobank_order_id),
GROUP_CONCAT(DISTINCT biobank_stored_sample_id)
"""
)
_SALIVARY_MISSING_REPORT_SQL = (
"""
SELECT DISTINCT
CONCAT(:biobank_id_prefix, p.biobank_id) AS biobank_id
, dvo.tracking_id AS usps_tracking_id
, dvo.biobank_order_id AS order_id
, bo.created AS collection_date
, p.participant_origin
FROM
biobank_mail_kit_order dvo
JOIN participant p ON p.participant_id = dvo.participant_id
JOIN biobank_order bo ON bo.biobank_order_id = dvo.biobank_order_id
JOIN biobank_ordered_sample bos ON bos.order_id = bo.biobank_order_id
JOIN biobank_order_identifier boi ON boi.biobank_order_id = bo.biobank_order_id
LEFT JOIN biobank_stored_sample bss ON bss.biobank_id = p.biobank_id
WHERE TRUE
AND (
bo.created < DATE_SUB(now(), INTERVAL :n_days_interval DAY)
)
AND bss.biobank_stored_sample_id IS NULL
AND dvo.is_test_sample IS NOT TRUE
AND dvo.associated_hpo_id IS NULL
"""
)
def in_past_n_days(result, now, n_days, ordered_before=None):
sent_collection_time_str = result[_SENT_COLLECTION_TIME_INDEX]
received_time_str = result[_RECEIVED_TIME_INDEX]
max_time = None
if sent_collection_time_str:
max_time = parse_datetime(sent_collection_time_str)
if ordered_before and max_time > ordered_before:
return False
if received_time_str:
received_time = parse_datetime(received_time_str)
if received_time and max_time:
max_time = max(received_time, max_time)
else:
max_time = received_time
if max_time:
return (now - max_time).days <= n_days
return False
| bsd-3-clause | b0d5d8c567284ab58ee0d1fff50824ac | 40.670576 | 120 | 0.679535 | 3.551426 | false | false | false | false |
django/django-localflavor | localflavor/se/utils.py | 4 | 2784 | import datetime
def id_number_checksum(gd):
"""Calculates a Swedish ID number checksum, using the "Luhn"-algoritm."""
n = s = 0
for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']):
# When validating interim ID numbers, the letter is considered
# equivalent to 1. Source:
# https://wiki.swami.se/display/Inkubator/norEduPersonNIN+och+Svenska+Personnummer
if c.isalpha():
c = 1
tmp = ((n % 2) and 1 or 2) * int(c)
if tmp > 9:
tmp = sum([int(i) for i in str(tmp)])
s += tmp
n += 1
if (s % 10) == 0:
return 0
return (((s // 10) + 1) * 10) - s
def validate_id_birthday(gd, fix_coordination_number_day=True):
"""
Validates the birth_day and returns the datetime.date object for the birth_day.
If the date is an invalid birth day, a ValueError will be raised.
"""
today = datetime.date.today()
day = int(gd['day'])
if fix_coordination_number_day and day > 60:
day -= 60
if gd['century'] is None:
# The century was not specified, and need to be calculated from todays date
year = int(today.strftime('%Y')) - int(today.strftime('%y')) + int(gd['year'])
if ('%s%s%02d' % (gd['year'], gd['month'], day)) > today.strftime('%y%m%d'):
year -= 100
# If the person is older than 100 years
if gd['sign'] == '+':
year -= 100
else:
year = int(gd['century'] + gd['year'])
# Make sure the year is valid
# There are no swedish personal identity numbers where year < 1800
if year < 1800:
raise ValueError
# ValueError will be raise for invalid dates
birth_day = datetime.date(year, int(gd['month']), day)
# birth_day must not be in the future
if birth_day > today:
raise ValueError
return birth_day
def format_personal_id_number(birth_day, gd):
# birth_day.strftime cannot be used, since it does not support dates < 1900
# If the ID number is an interim number, the letter in the serial part
# should be normalized to upper case. Source:
# https://wiki.swami.se/display/Inkubator/norEduPersonNIN+och+Svenska+Personnummer
return str(birth_day.year) + gd['month'] + gd['day'] + gd['serial'].upper() + gd['checksum']
def format_organisation_number(gd):
if gd['century'] is None:
century = ''
else:
century = gd['century']
return century + gd['year'] + gd['month'] + gd['day'] + gd['serial'] + gd['checksum']
def valid_organisation(gd):
return gd['century'] in (None, 16) and \
int(gd['month']) >= 20 and \
gd['sign'] in (None, '-') and \
gd['year'][0] in ('2', '5', '7', '8', '9') # group identifier
| bsd-3-clause | 941503744368cf4f851f69da54447b49 | 29.933333 | 96 | 0.579023 | 3.449814 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/healthcareservice_tests.py | 1 | 6957 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import healthcareservice
from .fhirdate import FHIRDate
class HealthcareServiceTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("HealthcareService", js["resourceType"])
return healthcareservice.HealthcareService(js)
def testHealthcareService1(self):
inst = self.instantiate_from("healthcareservice-example.json")
self.assertIsNotNone(inst, "Must have instantiated a HealthcareService instance")
self.implHealthcareService1(inst)
js = inst.as_json()
self.assertEqual("HealthcareService", js["resourceType"])
inst2 = healthcareservice.HealthcareService(js)
self.implHealthcareService1(inst2)
def implHealthcareService1(self, inst):
self.assertTrue(inst.active)
self.assertFalse(inst.appointmentRequired)
self.assertEqual(inst.availabilityExceptions, "Reduced capacity is available during the Christmas period")
self.assertTrue(inst.availableTime[0].allDay)
self.assertEqual(inst.availableTime[0].daysOfWeek[0], "wed")
self.assertEqual(inst.availableTime[1].availableEndTime.date, FHIRDate("05:30:00").date)
self.assertEqual(inst.availableTime[1].availableEndTime.as_json(), "05:30:00")
self.assertEqual(inst.availableTime[1].availableStartTime.date, FHIRDate("08:30:00").date)
self.assertEqual(inst.availableTime[1].availableStartTime.as_json(), "08:30:00")
self.assertEqual(inst.availableTime[1].daysOfWeek[0], "mon")
self.assertEqual(inst.availableTime[1].daysOfWeek[1], "tue")
self.assertEqual(inst.availableTime[1].daysOfWeek[2], "thu")
self.assertEqual(inst.availableTime[1].daysOfWeek[3], "fri")
self.assertEqual(inst.availableTime[2].availableEndTime.date, FHIRDate("04:30:00").date)
self.assertEqual(inst.availableTime[2].availableEndTime.as_json(), "04:30:00")
self.assertEqual(inst.availableTime[2].availableStartTime.date, FHIRDate("09:30:00").date)
self.assertEqual(inst.availableTime[2].availableStartTime.as_json(), "09:30:00")
self.assertEqual(inst.availableTime[2].daysOfWeek[0], "sat")
self.assertEqual(inst.availableTime[2].daysOfWeek[1], "fri")
self.assertEqual(inst.category[0].coding[0].code, "8")
self.assertEqual(inst.category[0].coding[0].display, "Counselling")
self.assertEqual(inst.category[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/service-category")
self.assertEqual(inst.category[0].text, "Counselling")
self.assertEqual(inst.characteristic[0].coding[0].display, "Wheelchair access")
self.assertEqual(inst.comment, "Providing Specialist psychology services to the greater Den Burg area, many years of experience dealing with PTSD issues")
self.assertEqual(inst.contained[0].id, "DenBurg")
self.assertEqual(inst.eligibility[0].code.coding[0].display, "DVA Required")
self.assertEqual(inst.eligibility[0].comment, "Evidence of application for DVA status may be sufficient for commencing assessment")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://example.org/shared-ids")
self.assertEqual(inst.identifier[0].value, "HS-12")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Consulting psychologists and/or psychology services")
self.assertEqual(inst.notAvailable[0].description, "Christmas/Boxing Day")
self.assertEqual(inst.notAvailable[0].during.end.date, FHIRDate("2015-12-26").date)
self.assertEqual(inst.notAvailable[0].during.end.as_json(), "2015-12-26")
self.assertEqual(inst.notAvailable[0].during.start.date, FHIRDate("2015-12-25").date)
self.assertEqual(inst.notAvailable[0].during.start.as_json(), "2015-12-25")
self.assertEqual(inst.notAvailable[1].description, "New Years Day")
self.assertEqual(inst.notAvailable[1].during.end.date, FHIRDate("2016-01-01").date)
self.assertEqual(inst.notAvailable[1].during.end.as_json(), "2016-01-01")
self.assertEqual(inst.notAvailable[1].during.start.date, FHIRDate("2016-01-01").date)
self.assertEqual(inst.notAvailable[1].during.start.as_json(), "2016-01-01")
self.assertEqual(inst.program[0].text, "PTSD outreach")
self.assertEqual(inst.referralMethod[0].coding[0].code, "phone")
self.assertEqual(inst.referralMethod[0].coding[0].display, "Phone")
self.assertEqual(inst.referralMethod[1].coding[0].code, "fax")
self.assertEqual(inst.referralMethod[1].coding[0].display, "Fax")
self.assertEqual(inst.referralMethod[2].coding[0].code, "elec")
self.assertEqual(inst.referralMethod[2].coding[0].display, "Secure Messaging")
self.assertEqual(inst.referralMethod[3].coding[0].code, "semail")
self.assertEqual(inst.referralMethod[3].coding[0].display, "Secure Email")
self.assertEqual(inst.serviceProvisionCode[0].coding[0].code, "cost")
self.assertEqual(inst.serviceProvisionCode[0].coding[0].display, "Fees apply")
self.assertEqual(inst.serviceProvisionCode[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/service-provision-conditions")
self.assertEqual(inst.specialty[0].coding[0].code, "47505003")
self.assertEqual(inst.specialty[0].coding[0].display, "Posttraumatic stress disorder")
self.assertEqual(inst.specialty[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "(555) silent")
self.assertEqual(inst.telecom[1].system, "email")
self.assertEqual(inst.telecom[1].use, "work")
self.assertEqual(inst.telecom[1].value, "directaddress@example.com")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "394913002")
self.assertEqual(inst.type[0].coding[0].display, "Psychotherapy")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.type[1].coding[0].code, "394587001")
self.assertEqual(inst.type[1].coding[0].display, "Psychiatry")
self.assertEqual(inst.type[1].coding[0].system, "http://snomed.info/sct")
| bsd-3-clause | a1c64378c4793177cd20219a92d0c25e | 63.416667 | 162 | 0.700014 | 3.365747 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/api/participant_summary_api.py | 1 | 11905 | import logging
from flask import request
from werkzeug.exceptions import BadRequest, Forbidden, InternalServerError, NotFound
from rdr_service.api.base_api import BaseApi, make_sync_results_for_request
from rdr_service.api_util import AWARDEE, DEV_MAIL, RDR_AND_PTC, PTC_HEALTHPRO_AWARDEE_CURATION
from rdr_service.app_util import auth_required, get_validated_user_info, restrict_to_gae_project
from rdr_service.dao.base_dao import _MIN_ID, _MAX_ID
from rdr_service.dao.hpro_consent_dao import HealthProConsentDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_incentives_dao import ParticipantIncentivesDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.dao.site_dao import SiteDao
from rdr_service.model.hpo import HPO
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.config import getSettingList, HPO_LITE_AWARDEE
from rdr_service.code_constants import UNSET
from rdr_service.participant_enums import ParticipantSummaryRecord
PTC_ALLOWED_ENVIRONMENTS = [
'all-of-us-rdr-sandbox',
'all-of-us-rdr-stable',
'all-of-us-rdr-ptsc-1-test',
'localhost'
]
class ParticipantSummaryApi(BaseApi):
def __init__(self):
super(ParticipantSummaryApi, self).__init__(ParticipantSummaryDao(), get_returns_children=True)
self.user_info = None
self.query = None
self.site_dao = None
self.participant_dao = ParticipantDao()
self.hpro_consent_dao = HealthProConsentDao()
self.incentives_dao = ParticipantIncentivesDao()
@auth_required(PTC_HEALTHPRO_AWARDEE_CURATION)
def get(self, p_id=None):
# Make sure participant id is in the correct range of possible values.
if isinstance(p_id, int) and not _MIN_ID <= p_id <= _MAX_ID:
raise NotFound(f"Participant with ID {p_id} is not found.")
auth_awardee = None
user_email, user_info = get_validated_user_info()
self.user_info = user_info
if AWARDEE in user_info["roles"]:
# if `user_email == DEV_MAIL and user_info.get("awardee") is not None` is True,
# that means the value of `awardee` is mocked in the test cases, we need to read it from user_info
if user_email == DEV_MAIL and user_info.get("awardee") is None:
auth_awardee = request.args.get("awardee")
else:
try:
auth_awardee = user_info["awardee"]
except KeyError:
raise InternalServerError("Config error for awardee")
# data only for user_awardee, assert that query has same awardee
if p_id is not None:
if auth_awardee and user_email != DEV_MAIL:
raise Forbidden
self._filter_by_user_site(participant_id=p_id)
if any(role in ['healthpro'] for role in self.user_info.get('roles')):
self._fetch_hpro_consents(pids=p_id)
self._fetch_participant_incentives(pids=p_id)
return super(ParticipantSummaryApi, self).get(p_id)
else:
if auth_awardee:
# make sure request has awardee
requested_awardee = request.args.get("awardee")
hpo_lite_awardees = getSettingList(HPO_LITE_AWARDEE, default=[])
if requested_awardee == UNSET and auth_awardee in hpo_lite_awardees:
# allow hpo lite awardee to access UNSET participants
pass
elif requested_awardee != auth_awardee:
raise Forbidden
return self._query("participantId")
@auth_required(RDR_AND_PTC)
@restrict_to_gae_project(PTC_ALLOWED_ENVIRONMENTS)
def post(self, p_id):
participant = self.participant_dao.get(p_id)
if not participant:
raise NotFound(f"Participant P{p_id} was not found")
participant_summary = self.dao.get_by_participant_id(p_id)
if participant_summary:
raise BadRequest(f"Participant Summary for P{p_id} already exists, updates are not allowed.")
return super(ParticipantSummaryApi, self).post(p_id)
def _make_query(self, check_invalid=True):
constraint_failed, message = self._check_constraints()
if constraint_failed:
raise BadRequest(f"{message}")
self.query = super(ParticipantSummaryApi, self)._make_query(check_invalid)
self.query.always_return_token = self._get_request_arg_bool("_sync")
self.query.backfill_sync = self._get_request_arg_bool("_backfill", True)
self._filter_by_user_site()
return self.query
def _make_bundle(self, results, id_field, participant_id):
if self._get_request_arg_bool("_sync"):
return make_sync_results_for_request(self.dao, results)
return super(ParticipantSummaryApi, self)._make_bundle(results, id_field, participant_id)
def _check_constraints(self):
message = None
invalid = False
valid_roles = ['healthpro']
if not any(role in valid_roles for role in self.user_info.get('roles')):
return invalid, message
pair_config = {
'lastName': {
'fields': ['lastName', 'dateOfBirth'],
'bypass_check_args': ['hpoId']
},
'dateOfBirth': {
'fields': ['lastName', 'dateOfBirth'],
'bypass_check_args': ['hpoId']
}
}
for arg in request.args:
if arg in pair_config.keys():
constraint = pair_config[arg]
bypass = [val for val in constraint['bypass_check_args'] if val in request.args]
missing = [val for val in constraint['fields'] if val not in request.args]
if not bypass and missing:
invalid = True
message = f'Argument {missing[0]} is required with {arg}'
break
return invalid, message
def _query(self, id_field, participant_id=None):
logging.info(f"Preparing query for {self.dao.model_type}.")
query = self._make_query()
results = self.dao.query(query)
participant_ids = [obj.participantId for obj in results.items if hasattr(obj, 'participantId')]
if any(role in ['healthpro'] for role in self.user_info.get('roles')) and participant_ids:
self._fetch_hpro_consents(participant_ids)
self._fetch_participant_incentives(participant_ids)
logging.info("Query complete, bundling results.")
response = self._make_bundle(results, id_field, participant_id)
logging.info("Returning response.")
return response
def _fetch_hpro_consents(self, pids):
if type(pids) is not list:
self.dao.hpro_consents = self.hpro_consent_dao.get_by_participant(pids)
else:
self.dao.hpro_consents = self.hpro_consent_dao.batch_get_by_participant(pids)
def _fetch_participant_incentives(self, pids):
self.dao.participant_incentives = self.incentives_dao.get_by_participant(pids)
def _filter_by_user_site(self, participant_id=None):
if not self.user_info.get('site'):
return
user_site = self.user_info.get('site')
if type(user_site) is list:
user_site = user_site[0]
self.site_dao = SiteDao()
site_obj = self.site_dao.get_by_google_group(user_site)
if not site_obj:
raise BadRequest(f"No site found with google group {user_site}, that is attached to request user")
if not participant_id:
user_info_site_filter = self.dao.make_query_filter('site', user_site)
if user_info_site_filter:
current_site_filter = list(filter(lambda x: x.field_name == 'siteId', self.query.field_filters))
if current_site_filter:
self.query.field_filters.remove(current_site_filter[0])
self.query.field_filters.append(user_info_site_filter)
return
participant_summary = self.dao.get_by_participant_id(participant_id)
if not participant_summary:
return
if participant_summary.siteId and \
participant_summary.siteId != site_obj.siteId:
raise Forbidden(f"Site attached to the request user, "
f"{user_site} is forbidden from accessing this participant")
return
class ParticipantSummaryModifiedApi(BaseApi):
"""
API to return participant_id and last_modified fields
"""
def __init__(self):
super(ParticipantSummaryModifiedApi, self).__init__(ParticipantSummaryDao())
@auth_required(PTC_HEALTHPRO_AWARDEE_CURATION)
def get(self):
"""
Return participant_id and last_modified for all records or a subset based
on the awardee parameter.
"""
response = list()
user_email, user_info = get_validated_user_info()
request_awardee = None
with self.dao.session() as session:
# validate parameter when passed an awardee.
if "awardee" in request.args:
request_awardee = request.args.get("awardee")
hpo = session.query(HPO.hpoId).filter(HPO.name == request_awardee).first()
if not hpo:
raise BadRequest("invalid awardee")
# verify user has access to the requested awardee.
if AWARDEE in user_info["roles"] and user_email != DEV_MAIL:
try:
if not request_awardee or user_info["awardee"] != request_awardee:
raise Forbidden
except KeyError:
raise InternalServerError("config error for awardee")
query = session.query(ParticipantSummary.participantId, ParticipantSummary.lastModified)
query = query.order_by(ParticipantSummary.participantId)
if request_awardee:
query = query.filter(ParticipantSummary.hpoId == hpo.hpoId)
items = query.all()
for item in items:
response.append(
{
"participantId": "P{0}".format(item.participantId),
"lastModified": item.lastModified.isoformat()
}
)
return response
class ParticipantSummaryCheckLoginApi(BaseApi):
"""
API to return status if data is found / not found on participant summary
"""
def __init__(self):
super(ParticipantSummaryCheckLoginApi, self).__init__(ParticipantSummaryDao())
@auth_required(RDR_AND_PTC)
def post(self):
"""
Return status of IN_USE / NOT_IN_USE if participant found / not found
"""
req_data = request.get_json()
accepted_map = {
'email': 'email',
'login_phone_number': 'loginPhoneNumber'
}
if req_data:
if len(req_data.keys() - accepted_map.keys()):
raise BadRequest("Only email or login_phone_number are allowed in request")
if any([key in req_data for key in accepted_map]) \
and all([val for val in req_data.values() if val is not None]):
status = ParticipantSummaryRecord.NOT_IN_USE
for key, value in req_data.items():
found_result = self.dao.get_record_from_attr(
attr=accepted_map[key],
value=value
)
if found_result:
status = ParticipantSummaryRecord.IN_USE
break
return {'status': status.name}
raise BadRequest("Missing email or login_phone_number in request")
| bsd-3-clause | 07905b87137220923be7fc4e847bf1ce | 39.355932 | 112 | 0.60924 | 3.849014 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/episodeofcare_tests.py | 1 | 3909 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import io
import json
import os
import unittest
from . import episodeofcare
from .fhirdate import FHIRDate
class EpisodeOfCareTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("EpisodeOfCare", js["resourceType"])
return episodeofcare.EpisodeOfCare(js)
def testEpisodeOfCare1(self):
inst = self.instantiate_from("episodeofcare-example.json")
self.assertIsNotNone(inst, "Must have instantiated a EpisodeOfCare instance")
self.implEpisodeOfCare1(inst)
js = inst.as_json()
self.assertEqual("EpisodeOfCare", js["resourceType"])
inst2 = episodeofcare.EpisodeOfCare(js)
self.implEpisodeOfCare1(inst2)
def implEpisodeOfCare1(self, inst):
self.assertEqual(inst.careTeam[0].period.end.date, FHIRDate("2014-09-16").date)
self.assertEqual(inst.careTeam[0].period.end.as_json(), "2014-09-16")
self.assertEqual(inst.careTeam[0].period.start.date, FHIRDate("2014-09-01").date)
self.assertEqual(inst.careTeam[0].period.start.as_json(), "2014-09-01")
self.assertEqual(inst.careTeam[0].role[0].coding[0].code, "AO")
self.assertEqual(inst.careTeam[0].role[0].coding[0].display, "Assessment Worker")
self.assertEqual(inst.careTeam[0].role[0].coding[0].system, "http://example.org/EpisodeOfCare/Role")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://example.org/sampleepisodeofcare-identifier")
self.assertEqual(inst.identifier[0].value, "123")
self.assertEqual(inst.period.start.date, FHIRDate("2014-09-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-09-01")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.statusHistory[0].period.end.date, FHIRDate("2014-09-14").date)
self.assertEqual(inst.statusHistory[0].period.end.as_json(), "2014-09-14")
self.assertEqual(inst.statusHistory[0].period.start.date, FHIRDate("2014-09-01").date)
self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2014-09-01")
self.assertEqual(inst.statusHistory[0].status, "planned")
self.assertEqual(inst.statusHistory[1].period.end.date, FHIRDate("2014-09-21").date)
self.assertEqual(inst.statusHistory[1].period.end.as_json(), "2014-09-21")
self.assertEqual(inst.statusHistory[1].period.start.date, FHIRDate("2014-09-15").date)
self.assertEqual(inst.statusHistory[1].period.start.as_json(), "2014-09-15")
self.assertEqual(inst.statusHistory[1].status, "active")
self.assertEqual(inst.statusHistory[2].period.end.date, FHIRDate("2014-09-24").date)
self.assertEqual(inst.statusHistory[2].period.end.as_json(), "2014-09-24")
self.assertEqual(inst.statusHistory[2].period.start.date, FHIRDate("2014-09-22").date)
self.assertEqual(inst.statusHistory[2].period.start.as_json(), "2014-09-22")
self.assertEqual(inst.statusHistory[2].status, "onhold")
self.assertEqual(inst.statusHistory[3].period.start.date, FHIRDate("2014-09-25").date)
self.assertEqual(inst.statusHistory[3].period.start.as_json(), "2014-09-25")
self.assertEqual(inst.statusHistory[3].status, "active")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "HACC")
self.assertEqual(inst.type[0].coding[0].display, "Home and Community Care Package")
self.assertEqual(inst.type[0].coding[0].system, "http://example.org/EpisodeOfCare/Type")
| bsd-3-clause | e667c0f32d5f2303bb044bdfd80f7d56 | 54.056338 | 108 | 0.68483 | 3.235927 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/careplan.py | 1 | 15036 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/CarePlan) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class CarePlan(domainresource.DomainResource):
""" Healthcare plan for patient or group.
Describes the intention of how one or more practitioners intend to deliver
care for a particular patient, group or community for a period of time,
possibly limited to care for a specific condition or set of conditions.
"""
resource_type = "CarePlan"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.activity = None
""" Action to occur as part of plan.
List of `CarePlanActivity` items (represented as `dict` in JSON). """
self.addresses = None
""" Health issues this plan addresses.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.author = None
""" Who is the designated responsible party.
Type `FHIRReference` (represented as `dict` in JSON). """
self.basedOn = None
""" Fulfills CarePlan.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.careTeam = None
""" Who's involved in plan?.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.category = None
""" Type of plan.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.contributor = None
""" Who provided the content of the care plan.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.created = None
""" Date record was first recorded.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Summary of nature of plan.
Type `str`. """
self.encounter = None
""" Encounter created as part of.
Type `FHIRReference` (represented as `dict` in JSON). """
self.goal = None
""" Desired outcome of plan.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.identifier = None
""" External Ids for this plan.
List of `Identifier` items (represented as `dict` in JSON). """
self.instantiatesCanonical = None
""" Instantiates FHIR protocol or definition.
List of `str` items. """
self.instantiatesUri = None
""" Instantiates external protocol or definition.
List of `str` items. """
self.intent = None
""" proposal | plan | order | option.
Type `str`. """
self.note = None
""" Comments about the plan.
List of `Annotation` items (represented as `dict` in JSON). """
self.partOf = None
""" Part of referenced CarePlan.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.period = None
""" Time period plan covers.
Type `Period` (represented as `dict` in JSON). """
self.replaces = None
""" CarePlan replaced by this CarePlan.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.status = None
""" draft | active | suspended | completed | entered-in-error |
cancelled | unknown.
Type `str`. """
self.subject = None
""" Who the care plan is for.
Type `FHIRReference` (represented as `dict` in JSON). """
self.supportingInfo = None
""" Information considered as part of plan.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.title = None
""" Human-friendly name for the care plan.
Type `str`. """
super(CarePlan, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CarePlan, self).elementProperties()
js.extend([
("activity", "activity", CarePlanActivity, True, None, False),
("addresses", "addresses", fhirreference.FHIRReference, True, None, False),
("author", "author", fhirreference.FHIRReference, False, None, False),
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("careTeam", "careTeam", fhirreference.FHIRReference, True, None, False),
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("contributor", "contributor", fhirreference.FHIRReference, True, None, False),
("created", "created", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("goal", "goal", fhirreference.FHIRReference, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("instantiatesCanonical", "instantiatesCanonical", str, True, None, False),
("instantiatesUri", "instantiatesUri", str, True, None, False),
("intent", "intent", str, False, None, True),
("note", "note", annotation.Annotation, True, None, False),
("partOf", "partOf", fhirreference.FHIRReference, True, None, False),
("period", "period", period.Period, False, None, False),
("replaces", "replaces", fhirreference.FHIRReference, True, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("supportingInfo", "supportingInfo", fhirreference.FHIRReference, True, None, False),
("title", "title", str, False, None, False),
])
return js
from . import backboneelement
class CarePlanActivity(backboneelement.BackboneElement):
""" Action to occur as part of plan.
Identifies a planned action to occur as part of the plan. For example, a
medication to be used, lab tests to perform, self-monitoring, education,
etc.
"""
resource_type = "CarePlanActivity"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.detail = None
""" In-line definition of activity.
Type `CarePlanActivityDetail` (represented as `dict` in JSON). """
self.outcomeCodeableConcept = None
""" Results of the activity.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.outcomeReference = None
""" Appointment, Encounter, Procedure, etc..
List of `FHIRReference` items (represented as `dict` in JSON). """
self.progress = None
""" Comments about the activity status/progress.
List of `Annotation` items (represented as `dict` in JSON). """
self.reference = None
""" Activity details defined in specific resource.
Type `FHIRReference` (represented as `dict` in JSON). """
super(CarePlanActivity, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CarePlanActivity, self).elementProperties()
js.extend([
("detail", "detail", CarePlanActivityDetail, False, None, False),
("outcomeCodeableConcept", "outcomeCodeableConcept", codeableconcept.CodeableConcept, True, None, False),
("outcomeReference", "outcomeReference", fhirreference.FHIRReference, True, None, False),
("progress", "progress", annotation.Annotation, True, None, False),
("reference", "reference", fhirreference.FHIRReference, False, None, False),
])
return js
class CarePlanActivityDetail(backboneelement.BackboneElement):
""" In-line definition of activity.
A simple summary of a planned activity suitable for a general care plan
system (e.g. form driven) that doesn't know about specific resources such
as procedure etc.
"""
resource_type = "CarePlanActivityDetail"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Detail type of activity.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.dailyAmount = None
""" How to consume/day?.
Type `Quantity` (represented as `dict` in JSON). """
self.description = None
""" Extra info describing activity to perform.
Type `str`. """
self.doNotPerform = None
""" If true, activity is prohibiting action.
Type `bool`. """
self.goal = None
""" Goals this activity relates to.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.instantiatesCanonical = None
""" Instantiates FHIR protocol or definition.
List of `str` items. """
self.instantiatesUri = None
""" Instantiates external protocol or definition.
List of `str` items. """
self.kind = None
""" Kind of resource.
Type `str`. """
self.location = None
""" Where it should happen.
Type `FHIRReference` (represented as `dict` in JSON). """
self.performer = None
""" Who will be responsible?.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.productCodeableConcept = None
""" What is to be administered/supplied.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.productReference = None
""" What is to be administered/supplied.
Type `FHIRReference` (represented as `dict` in JSON). """
self.quantity = None
""" How much to administer/supply/consume.
Type `Quantity` (represented as `dict` in JSON). """
self.reasonCode = None
""" Why activity should be done or why activity was prohibited.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
""" Why activity is needed.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.scheduledPeriod = None
""" When activity is to occur.
Type `Period` (represented as `dict` in JSON). """
self.scheduledString = None
""" When activity is to occur.
Type `str`. """
self.scheduledTiming = None
""" When activity is to occur.
Type `Timing` (represented as `dict` in JSON). """
self.status = None
""" not-started | scheduled | in-progress | on-hold | completed |
cancelled | stopped | unknown | entered-in-error.
Type `str`. """
self.statusReason = None
""" Reason for current status.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(CarePlanActivityDetail, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CarePlanActivityDetail, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("dailyAmount", "dailyAmount", quantity.Quantity, False, None, False),
("description", "description", str, False, None, False),
("doNotPerform", "doNotPerform", bool, False, None, False),
("goal", "goal", fhirreference.FHIRReference, True, None, False),
("instantiatesCanonical", "instantiatesCanonical", str, True, None, False),
("instantiatesUri", "instantiatesUri", str, True, None, False),
("kind", "kind", str, False, None, False),
("location", "location", fhirreference.FHIRReference, False, None, False),
("performer", "performer", fhirreference.FHIRReference, True, None, False),
("productCodeableConcept", "productCodeableConcept", codeableconcept.CodeableConcept, False, "product", False),
("productReference", "productReference", fhirreference.FHIRReference, False, "product", False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("scheduledPeriod", "scheduledPeriod", period.Period, False, "scheduled", False),
("scheduledString", "scheduledString", str, False, "scheduled", False),
("scheduledTiming", "scheduledTiming", timing.Timing, False, "scheduled", False),
("status", "status", str, False, None, True),
("statusReason", "statusReason", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + '.timing']
| bsd-3-clause | 3f61df3139201c4a01ac569c440575a7 | 39.970027 | 123 | 0.601889 | 4.318208 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/repository/questionnaire_response_repository.py | 1 | 10019 | from collections import defaultdict
from datetime import datetime
from typing import Dict, List, Optional
from sqlalchemy import and_, func, or_
from sqlalchemy.orm import aliased, joinedload, Session
from rdr_service import code_constants, participant_enums as enums
from rdr_service.domain_model import response as response_domain_model
from rdr_service.model.code import Code
from rdr_service.model.participant import Participant
from rdr_service.model.questionnaire import QuestionnaireConcept, QuestionnaireQuestion
from rdr_service.model.questionnaire_response import QuestionnaireResponse, QuestionnaireResponseAnswer
from rdr_service.services.system_utils import DateRange
class QuestionnaireResponseRepository:
@classmethod
def get_responses_to_surveys(
cls,
session: Session,
survey_codes: List[str] = None,
participant_ids: List[int] = None,
include_ignored_answers=False,
sent_statuses: Optional[List[enums.QuestionnaireResponseStatus]] = None,
classification_types: Optional[List[enums.QuestionnaireResponseClassificationType]] = None,
created_start_datetime: datetime = None,
created_end_datetime: datetime = None
) -> Dict[int, response_domain_model.ParticipantResponses]:
"""
Retrieve questionnaire response data (returned as a domain model) for the specified participant ids
and survey codes.
:param survey_codes: Survey module code strings to get responses for
:param session: Session to use for connecting to the database
:param participant_ids: Participant ids to get responses for
:param include_ignored_answers: Include response answers that have been ignored
:param sent_statuses: List of QuestionnaireResponseStatus to use when filtering responses
(defaults to QuestionnaireResponseStatus.COMPLETED)
:param classification_types: List of QuestionnaireResponseClassificationTypes to filter results by
:param created_start_datetime: Optional start date, if set only responses that were sent to
the API after this date will be returned
:param created_end_datetime: Optional end date, if set only responses that were sent to the
API before this date will be returned
:return: A dictionary keyed by participant ids with the value being the collection of responses for
that participant
"""
if sent_statuses is None:
sent_statuses = [enums.QuestionnaireResponseStatus.COMPLETED]
if classification_types is None:
classification_types = [enums.QuestionnaireResponseClassificationType.COMPLETE]
# Build query for all the questions answered by the given participants for the given survey codes
question_code = aliased(Code)
survey_code = aliased(Code)
query = (
session.query(
func.lower(question_code.value),
QuestionnaireResponse.participantId,
QuestionnaireResponse.questionnaireResponseId,
QuestionnaireResponse.authored,
survey_code.value,
QuestionnaireResponseAnswer,
QuestionnaireResponse.status
)
.select_from(QuestionnaireResponseAnswer)
.join(QuestionnaireQuestion)
.join(QuestionnaireResponse)
.join(
Participant,
Participant.participantId == QuestionnaireResponse.participantId
)
.join(question_code, question_code.codeId == QuestionnaireQuestion.codeId)
.join(
QuestionnaireConcept,
and_(
QuestionnaireConcept.questionnaireId == QuestionnaireResponse.questionnaireId,
QuestionnaireConcept.questionnaireVersion == QuestionnaireResponse.questionnaireVersion
)
).join(survey_code, survey_code.codeId == QuestionnaireConcept.codeId)
.options(joinedload(QuestionnaireResponseAnswer.code))
.filter(
QuestionnaireResponse.status.in_(sent_statuses),
QuestionnaireResponse.classificationType.in_(classification_types),
Participant.isTestParticipant != 1
)
)
if survey_codes:
query = query.filter(
survey_code.value.in_(survey_codes)
)
if participant_ids:
query = query.filter(
QuestionnaireResponse.participantId.in_(participant_ids)
)
if not include_ignored_answers:
query = query.filter(
or_(
QuestionnaireResponseAnswer.ignore.is_(False),
QuestionnaireResponseAnswer.ignore.is_(None)
)
)
if created_start_datetime:
query = query.filter(
QuestionnaireResponse.created >= created_start_datetime
).with_hint(
QuestionnaireResponse,
'USE INDEX (idx_created_q_id)'
)
if created_end_datetime:
query = query.filter(
QuestionnaireResponse.created <= created_end_datetime
).with_hint(
QuestionnaireResponse,
'USE INDEX (idx_created_q_id)'
)
# build dict with participant ids as keys and ParticipantResponse objects as values
participant_response_map = defaultdict(response_domain_model.ParticipantResponses)
for question_code_str, participant_id, response_id, authored_datetime, survey_code_str, answer, \
status in query.all():
# Get the collection of responses for the participant
response_collection_for_participant = participant_response_map[participant_id]
# Get the response that this particular answer is for so we can store the answer
response = response_collection_for_participant.responses.get(response_id)
if not response:
# This is the first time seeing an answer for this response, so create the Response structure for it
response = response_domain_model.Response(
id=response_id,
survey_code=survey_code_str,
authored_datetime=authored_datetime,
status=status
)
response_collection_for_participant.responses[response_id] = response
response.answered_codes[question_code_str].append(
response_domain_model.Answer.from_db_model(answer)
)
return dict(participant_response_map)
@classmethod
def get_interest_in_sharing_ehr_ranges(cls, participant_id, session: Session):
# Load all EHR and DV_EHR responses
sharing_response_list = cls.get_responses_to_surveys(
session=session,
survey_codes=[
code_constants.CONSENT_FOR_DVEHR_MODULE,
code_constants.CONSENT_FOR_ELECTRONIC_HEALTH_RECORDS_MODULE
],
participant_ids=[participant_id]
).get(participant_id)
# Find all ranges where interest in sharing EHR was expressed (DV_EHR) or consent to share was provided
ehr_interest_date_ranges = []
if sharing_response_list:
current_date_range = None
for response in sharing_response_list.in_authored_order:
dv_interest_answer = response.get_single_answer_for(code_constants.DVEHR_SHARING_QUESTION_CODE)
# TODO: check if answer is null, and use a safe version of get_single_answer
if dv_interest_answer:
if (
dv_interest_answer.value.lower() == code_constants.DVEHRSHARING_CONSENT_CODE_YES.lower()
and current_date_range is None
):
current_date_range = DateRange(start=response.authored_datetime)
if (
dv_interest_answer.value.lower() != code_constants.DVEHRSHARING_CONSENT_CODE_YES.lower()
and current_date_range is not None
):
current_date_range.end = response.authored_datetime
ehr_interest_date_ranges.append(current_date_range)
current_date_range = None
consent_answer = response.get_single_answer_for(code_constants.EHR_CONSENT_QUESTION_CODE)
if consent_answer:
if (
consent_answer.value.lower() == code_constants.CONSENT_PERMISSION_YES_CODE.lower()
and current_date_range is None
):
current_date_range = DateRange(start=response.authored_datetime)
if (
consent_answer.value.lower() != code_constants.CONSENT_PERMISSION_YES_CODE.lower()
and current_date_range is not None
):
current_date_range.end = response.authored_datetime
ehr_interest_date_ranges.append(current_date_range)
current_date_range = None
expire_answer = response.get_single_answer_for(code_constants.EHR_CONSENT_EXPIRED_QUESTION_CODE)
if (
expire_answer
and expire_answer.value.lower() == code_constants.EHR_CONSENT_EXPIRED_YES
and current_date_range
):
current_date_range.end = response.authored_datetime
ehr_interest_date_ranges.append(current_date_range)
current_date_range = None
if current_date_range is not None:
ehr_interest_date_ranges.append(current_date_range)
return ehr_interest_date_ranges
| bsd-3-clause | 210b3bfa5c674c3c2c402d5a30d22901 | 46.937799 | 116 | 0.616529 | 4.642725 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/69453413dfc3_change_site_to_unicode_text.py | 1 | 1720 | """change site to unicode text
Revision ID: 69453413dfc3
Revises: 0a4ccc37472a
Create Date: 2018-07-11 14:20:54.392997
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "69453413dfc3"
down_revision = "0a4ccc37472a"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"site", "directions", existing_type=sa.String(1024), type_=sa.UnicodeText(), existing_nullable=True
)
op.alter_column("site", "notes", existing_type=sa.String(1024), type_=sa.UnicodeText(), existing_nullable=True)
op.alter_column("site", "notes_es", existing_type=sa.String(1024), type_=sa.UnicodeText(), existing_nullable=True)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"site", "directions", existing_type=sa.UnicodeText(), type_=sa.String(1024), existing_nullable=True
)
op.alter_column("site", "notes", existing_type=sa.UnicodeText(), type_=sa.String(1024), existing_nullable=True)
op.alter_column("site", "notes_es", existing_type=sa.UnicodeText(), type_=sa.String(1024), existing_nullable=True)
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 03e4c906fc1a244321d6a66a8c2980ec | 28.152542 | 118 | 0.672093 | 3.460765 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/measurereport_tests.py | 1 | 30095 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import measurereport
from .fhirdate import FHIRDate
class MeasureReportTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("MeasureReport", js["resourceType"])
return measurereport.MeasureReport(js)
def testMeasureReport1(self):
inst = self.instantiate_from("measurereport-cms146-cat1-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MeasureReport instance")
self.implMeasureReport1(inst)
js = inst.as_json()
self.assertEqual("MeasureReport", js["resourceType"])
inst2 = measurereport.MeasureReport(js)
self.implMeasureReport1(inst2)
def implMeasureReport1(self, inst):
self.assertEqual(inst.contained[0].id, "reporter")
self.assertEqual(inst.date.date, FHIRDate("2014-04-01").date)
self.assertEqual(inst.date.as_json(), "2014-04-01")
self.assertEqual(inst.group[0].id, "CMS146-group-1")
self.assertEqual(inst.group[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].population[0].count, 1)
self.assertEqual(inst.group[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].population[1].count, 1)
self.assertEqual(inst.group[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].population[2].count, 1)
self.assertEqual(inst.group[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[0].code[0].text, "stratifier-ages-up-to-9")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].count, 1)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].count, 1)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].count, 1)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[1].code[0].text, "stratifier-ages-10-plus")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].count, 1)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].count, 1)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].count, 1)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[2].code[0].text, "stratifier-gender")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].count, 1)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].count, 1)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].count, 1)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].value.text, "male")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].value.text, "female")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].value.text, "other")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].value.text, "unknown")
self.assertEqual(inst.id, "measurereport-cms146-cat1-example")
self.assertEqual(inst.identifier[0].value, "measurereport-cms146-cat1-example-2017-03-13")
self.assertEqual(inst.measure, "Measure/CMS146")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.period.end.date, FHIRDate("2014-03-31").date)
self.assertEqual(inst.period.end.as_json(), "2014-03-31")
self.assertEqual(inst.period.start.date, FHIRDate("2014-01-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-01-01")
self.assertEqual(inst.status, "complete")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "individual")
def testMeasureReport2(self):
inst = self.instantiate_from("measurereport-cms146-cat2-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MeasureReport instance")
self.implMeasureReport2(inst)
js = inst.as_json()
self.assertEqual("MeasureReport", js["resourceType"])
inst2 = measurereport.MeasureReport(js)
self.implMeasureReport2(inst2)
def implMeasureReport2(self, inst):
self.assertEqual(inst.contained[0].id, "reporter")
self.assertEqual(inst.date.date, FHIRDate("2014-04-01").date)
self.assertEqual(inst.date.as_json(), "2014-04-01")
self.assertEqual(inst.group[0].id, "CMS146-group-1")
self.assertEqual(inst.group[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].population[0].count, 500)
self.assertEqual(inst.group[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].population[1].count, 200)
self.assertEqual(inst.group[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].population[2].count, 500)
self.assertEqual(inst.group[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].population[3].count, 100)
self.assertEqual(inst.group[0].stratifier[0].code[0].text, "stratifier-ages-up-to-9")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[1].code[0].text, "stratifier-ages-10-plus")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[2].code[0].text, "stratifier-gender")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].value.text, "male")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].value.text, "female")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].value.text, "other")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].code.coding[0].code, "denominator-exclusions")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].value.text, "unknown")
self.assertEqual(inst.id, "measurereport-cms146-cat2-example")
self.assertEqual(inst.identifier[0].value, "measurereport-cms146-cat2-example-2017-03-13")
self.assertEqual(inst.measure, "Measure/CMS146")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.period.end.date, FHIRDate("2014-03-31").date)
self.assertEqual(inst.period.end.as_json(), "2014-03-31")
self.assertEqual(inst.period.start.date, FHIRDate("2014-01-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-01-01")
self.assertEqual(inst.status, "complete")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "subject-list")
def testMeasureReport3(self):
inst = self.instantiate_from("measurereport-cms146-cat3-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MeasureReport instance")
self.implMeasureReport3(inst)
js = inst.as_json()
self.assertEqual("MeasureReport", js["resourceType"])
inst2 = measurereport.MeasureReport(js)
self.implMeasureReport3(inst2)
def implMeasureReport3(self, inst):
self.assertEqual(inst.contained[0].id, "reporter")
self.assertEqual(inst.date.date, FHIRDate("2014-04-01").date)
self.assertEqual(inst.date.as_json(), "2014-04-01")
self.assertEqual(inst.group[0].id, "CMS146-group-1")
self.assertEqual(inst.group[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].population[0].count, 500)
self.assertEqual(inst.group[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].population[1].count, 200)
self.assertEqual(inst.group[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].population[2].count, 500)
self.assertEqual(inst.group[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].population[3].count, 100)
self.assertEqual(inst.group[0].stratifier[0].code[0].text, "stratifier-ages-up-to-9")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[0].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[0].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[0].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[1].code[0].text, "stratifier-ages-10-plus")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[1].stratum[0].value.text, "true")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[1].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[1].stratum[1].value.text, "false")
self.assertEqual(inst.group[0].stratifier[2].code[0].text, "stratifier-gender")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[0].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[2].stratum[0].value.text, "male")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[0].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[1].count, 100)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[2].count, 250)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[1].population[3].count, 50)
self.assertEqual(inst.group[0].stratifier[2].stratum[1].value.text, "female")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[2].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[2].value.text, "other")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].code.coding[0].code, "initial-population")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[0].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].code.coding[0].code, "numerator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[1].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].code.coding[0].code, "denominator")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[2].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].code.coding[0].code, "denominator-exclusion")
self.assertEqual(inst.group[0].stratifier[2].stratum[3].population[3].count, 0)
self.assertEqual(inst.group[0].stratifier[2].stratum[3].value.text, "unknown")
self.assertEqual(inst.id, "measurereport-cms146-cat3-example")
self.assertEqual(inst.identifier[0].value, "measurereport-cms146-cat3-example-2017-03-13")
self.assertEqual(inst.measure, "Measure/CMS146")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.period.end.date, FHIRDate("2014-03-31").date)
self.assertEqual(inst.period.end.as_json(), "2014-03-31")
self.assertEqual(inst.period.start.date, FHIRDate("2014-01-01").date)
self.assertEqual(inst.period.start.as_json(), "2014-01-01")
self.assertEqual(inst.status, "complete")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type, "summary")
| bsd-3-clause | 521f7138490084910991299302fe4ecf | 82.830084 | 124 | 0.698056 | 3.175918 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/goal.py | 1 | 8483 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/Goal) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class Goal(domainresource.DomainResource):
""" Describes the intended objective(s) for a patient, group or organization.
Describes the intended objective(s) for a patient, group or organization
care, for example, weight loss, restoring an activity of daily living,
obtaining herd immunity via immunization, meeting a process improvement
objective, etc.
"""
resource_type = "Goal"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.addresses = None
""" Issues addressed by this goal.
List of `FHIRReference` items referencing `Condition, Observation, MedicationStatement, NutritionOrder, ProcedureRequest, RiskAssessment` (represented as `dict` in JSON). """
self.category = None
""" E.g. Treatment, dietary, behavioral, etc..
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.description = None
""" Code or text describing goal.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.expressedBy = None
""" Who's responsible for creating Goal?.
Type `FHIRReference` referencing `Patient, Practitioner, RelatedPerson` (represented as `dict` in JSON). """
self.identifier = None
""" External Ids for this goal.
List of `Identifier` items (represented as `dict` in JSON). """
self.note = None
""" Comments about the goal.
List of `Annotation` items (represented as `dict` in JSON). """
self.outcomeCode = None
""" What result was achieved regarding the goal?.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.outcomeReference = None
""" Observation that resulted from goal.
List of `FHIRReference` items referencing `Observation` (represented as `dict` in JSON). """
self.priority = None
""" high-priority | medium-priority | low-priority.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.startCodeableConcept = None
""" When goal pursuit begins.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.startDate = None
""" When goal pursuit begins.
Type `FHIRDate` (represented as `str` in JSON). """
self.status = None
""" proposed | accepted | planned | in-progress | on-target | ahead-of-
target | behind-target | sustaining | achieved | on-hold |
cancelled | entered-in-error | rejected.
Type `str`. """
self.statusDate = None
""" When goal status took effect.
Type `FHIRDate` (represented as `str` in JSON). """
self.statusReason = None
""" Reason for current status.
Type `str`. """
self.subject = None
""" Who this goal is intended for.
Type `FHIRReference` referencing `Patient, Group, Organization` (represented as `dict` in JSON). """
self.target = None
""" Target outcome for the goal.
Type `GoalTarget` (represented as `dict` in JSON). """
super(Goal, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Goal, self).elementProperties()
js.extend([
("addresses", "addresses", fhirreference.FHIRReference, True, None, False),
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("description", "description", codeableconcept.CodeableConcept, False, None, True),
("expressedBy", "expressedBy", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("note", "note", annotation.Annotation, True, None, False),
("outcomeCode", "outcomeCode", codeableconcept.CodeableConcept, True, None, False),
("outcomeReference", "outcomeReference", fhirreference.FHIRReference, True, None, False),
("priority", "priority", codeableconcept.CodeableConcept, False, None, False),
("startCodeableConcept", "startCodeableConcept", codeableconcept.CodeableConcept, False, "start", False),
("startDate", "startDate", fhirdate.FHIRDate, False, "start", False),
("status", "status", str, False, None, True),
("statusDate", "statusDate", fhirdate.FHIRDate, False, None, False),
("statusReason", "statusReason", str, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("target", "target", GoalTarget, False, None, False),
])
return js
from . import backboneelement
class GoalTarget(backboneelement.BackboneElement):
""" Target outcome for the goal.
Indicates what should be done by when.
"""
resource_type = "GoalTarget"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.detailCodeableConcept = None
""" The target value to be achieved.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.detailQuantity = None
""" The target value to be achieved.
Type `Quantity` (represented as `dict` in JSON). """
self.detailRange = None
""" The target value to be achieved.
Type `Range` (represented as `dict` in JSON). """
self.dueDate = None
""" Reach goal on or before.
Type `FHIRDate` (represented as `str` in JSON). """
self.dueDuration = None
""" Reach goal on or before.
Type `Duration` (represented as `dict` in JSON). """
self.measure = None
""" The parameter whose value is being tracked.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(GoalTarget, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(GoalTarget, self).elementProperties()
js.extend([
("detailCodeableConcept", "detailCodeableConcept", codeableconcept.CodeableConcept, False, "detail", False),
("detailQuantity", "detailQuantity", quantity.Quantity, False, "detail", False),
("detailRange", "detailRange", range.Range, False, "detail", False),
("dueDate", "dueDate", fhirdate.FHIRDate, False, "due", False),
("dueDuration", "dueDuration", duration.Duration, False, "due", False),
("measure", "measure", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
| bsd-3-clause | 7851ce41ac5928063ee32b16d48b60aa | 39.588517 | 182 | 0.618649 | 4.254263 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/model/questionnaire_response.py | 1 | 8091 | from sqlalchemy import (
Boolean,
Column,
Date,
Float,
ForeignKey,
ForeignKeyConstraint,
Index,
Integer,
String,
Text
)
from sqlalchemy import BLOB # pylint: disable=unused-import
from sqlalchemy.orm import relationship
from sqlalchemy.sql import text
from typing import List
from rdr_service.model.base import Base
from rdr_service.model.utils import EnumZeroBased, UTCDateTime
from rdr_service.model.field_types import BlobUTF8
from rdr_service.participant_enums import QuestionnaireResponseStatus, QuestionnaireResponseClassificationType
class QuestionnaireResponse(Base):
""""A response to a questionnaire for a participant. Contains answers to questions found in the
questionnaire."""
__tablename__ = "questionnaire_response"
questionnaireResponseId = Column("questionnaire_response_id", Integer, primary_key=True, autoincrement=False)
"""RDR identifier for the response"""
questionnaireId = Column("questionnaire_id", Integer, nullable=False)
"""RDR identifier for the questionnaire"""
questionnaireVersion = Column("questionnaire_version", Integer, nullable=False)
"""RDR's version number of the questionnaire"""
questionnaireSemanticVersion = Column('questionnaire_semantic_version', String(100))
"""PTSC's version of the questionnaire (does not necessarily match RDR version)"""
participantId = Column("participant_id", Integer, ForeignKey("participant.participant_id"), nullable=False)
"""Identifier for the participant that responded to the questionnaire"""
nonParticipantAuthor = Column("non_participant_author", String(80), nullable=True)
created = Column("created", UTCDateTime, nullable=False)
"""The date and time the RDR API received the questionnaire response"""
authored = Column("authored", UTCDateTime, nullable=True)
"""The actual time the participant completed the questionnaire"""
language = Column("language", String(2), nullable=True)
"""Language that the response was completed in"""
# Can be used to indicate equality between sets of answers
answerHash = Column('answer_hash', String(32), nullable=True)
"""@rdr_dictionary_internal_column"""
externalId = Column('external_id', String(30), nullable=True)
"""@rdr_dictionary_internal_column"""
classificationType = Column('classification_type',
EnumZeroBased(QuestionnaireResponseClassificationType),
nullable=True, # To remain consistent with existing column definition and enable op.alter_column
default=QuestionnaireResponseClassificationType.COMPLETE,
server_default=text(str(int(QuestionnaireResponseClassificationType.COMPLETE))))
""" Classification of a response (e.g., COMPLETE or DUPLICATE) which can determine if it should be ignored """
resource = Column("resource", BlobUTF8, nullable=False)
status = Column(
EnumZeroBased(QuestionnaireResponseStatus),
nullable=False,
default=QuestionnaireResponseStatus.COMPLETED,
server_default=text(str(int(QuestionnaireResponseStatus.COMPLETED)))
)
answers: List['QuestionnaireResponseAnswer'] = relationship(
"QuestionnaireResponseAnswer", cascade="all, delete-orphan"
)
extensions = relationship('QuestionnaireResponseExtension')
__table_args__ = (
ForeignKeyConstraint(
["questionnaire_id", "questionnaire_version"],
["questionnaire_history.questionnaire_id", "questionnaire_history.version"],
),
Index('idx_response_identifier_answers', externalId, answerHash),
Index('idx_created_q_id', questionnaireId, created)
)
class QuestionnaireResponseAnswer(Base):
"""An answer found in a questionnaire response. Note that there could be multiple answers to
the same question, if the questionnaire allows for multiple answers.
An answer is given to a particular question which has a particular concept code. The answer is
the current answer for a participant from the time period between its parent response's creation
field and the endTime field (or now, if endTime is not set.)
When an answer is given by a participant in a questionnaire response, the endTime of any previous
answers to questions with the same concept codes that don't have endTime set yet should have
endTime set to the current time.
"""
# This is the maximum # bytes that can be stored in a MySQL TEXT field, which
# our field valueString should resolve to.
# This value has no real affect, under the hood we're changing it to LONGBLOB in alembic/env.py which is 4GB
VALUE_STRING_MAXLEN = 65535
__tablename__ = "questionnaire_response_answer"
questionnaireResponseAnswerId = Column("questionnaire_response_answer_id", Integer, primary_key=True)
"""RDR identifier for this answer"""
questionnaireResponseId = Column(
"questionnaire_response_id",
Integer,
ForeignKey("questionnaire_response.questionnaire_response_id"),
nullable=False,
)
"""RDR identifier for the response this answer is a part of"""
questionId = Column(
"question_id", Integer, ForeignKey("questionnaire_question.questionnaire_question_id"), nullable=False
)
"""Question that this answer is a response to"""
endTime = Column("end_time", UTCDateTime)
"""The time at which the participant completed another response to the survey, making this answer obsolete"""
valueSystem = Column("value_system", String(50))
"""The code system used for the response value"""
valueCodeId = Column("value_code_id", Integer, ForeignKey("code.code_id"))
"""The code used for the response value"""
valueBoolean = Column("value_boolean", Boolean)
"""When the response to the question is true or false"""
valueDecimal = Column("value_decimal", Float)
"""When the response to the question is a rational number that has a decimal representation"""
valueInteger = Column("value_integer", Integer)
"""When the response is a signed integer"""
valueString = Column("value_string", Text)
"""When the response is a sequence of Unicode characters"""
valueDate = Column("value_date", Date)
"""
When the response is a date, or partial date (e.g. just year or year + month) as used in human communication.
There SHALL be no time zone. Dates SHALL be valid dates.
"""
valueDateTime = Column("value_datetime", UTCDateTime)
"""
When the response is a date, date-time or partial date (e.g. just year or year + month)
as used in human communication.
"""
valueUri = Column("value_uri", String(1024))
"""
When the response is a Uniform Resource Identifier Reference (RFC 3986 ).
Note: URIs are case sensitive. For UUID (urn:uuid:53fefa32-fcbb-4ff8-8a92-55ee120877b7) use all lowercase
"""
ignore = Column(Boolean)
"""
A boolean (1 or 0) value indicating whether the answer should be ignored or not. This is used in
special circumstances to prevent mis-configured or otherwise invalid answers from being passed downstream.
See the `ignore_reason` column for notes on why a given answer is ignored.
"""
ignore_reason = Column(String(300))
"""
A note on the reason why a particular answer is being ignored. Will ideally link to further documentation
that can provide further context on the reason for ignoring the answer.
"""
code = relationship("Code")
class QuestionnaireResponseExtension(Base):
"""
Extension object provided with a questionnaire response payload, fields derived from the FHIR 1.0.6 standard
"""
__tablename__ = "questionnaire_response_extension"
id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
questionnaireResponseId = Column(
"questionnaire_response_id",
Integer,
ForeignKey(QuestionnaireResponse.questionnaireResponseId),
nullable=False
)
url = Column(String(1024))
valueCode = Column('value_code', String(512))
valueString = Column('value_string', String(512))
| bsd-3-clause | ef49c42cdfe53d3464b3f2788ea14754 | 45.768786 | 114 | 0.721542 | 4.24279 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/riskevidencesynthesis.py | 1 | 18420 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/RiskEvidenceSynthesis) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class RiskEvidenceSynthesis(domainresource.DomainResource):
""" A quantified estimate of risk based on a body of evidence.
The RiskEvidenceSynthesis resource describes the likelihood of an outcome
in a population plus exposure state where the risk estimate is derived from
a combination of research studies.
"""
resource_type = "RiskEvidenceSynthesis"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.approvalDate = None
""" When the risk evidence synthesis was approved by publisher.
Type `FHIRDate` (represented as `str` in JSON). """
self.author = None
""" Who authored the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.certainty = None
""" How certain is the risk.
List of `RiskEvidenceSynthesisCertainty` items (represented as `dict` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the risk evidence synthesis.
Type `str`. """
self.editor = None
""" Who edited the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.effectivePeriod = None
""" When the risk evidence synthesis is expected to be used.
Type `Period` (represented as `dict` in JSON). """
self.endorser = None
""" Who endorsed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.exposure = None
""" What exposure?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.identifier = None
""" Additional identifier for the risk evidence synthesis.
List of `Identifier` items (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for risk evidence synthesis (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.lastReviewDate = None
""" When the risk evidence synthesis was last reviewed.
Type `FHIRDate` (represented as `str` in JSON). """
self.name = None
""" Name for this risk evidence synthesis (computer friendly).
Type `str`. """
self.note = None
""" Used for footnotes or explanatory notes.
List of `Annotation` items (represented as `dict` in JSON). """
self.outcome = None
""" What outcome?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.population = None
""" What population?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.relatedArtifact = None
""" Additional documentation, citations, etc..
List of `RelatedArtifact` items (represented as `dict` in JSON). """
self.reviewer = None
""" Who reviewed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.riskEstimate = None
""" What was the estimated risk.
Type `RiskEvidenceSynthesisRiskEstimate` (represented as `dict` in JSON). """
self.sampleSize = None
""" What sample size was involved?.
Type `RiskEvidenceSynthesisSampleSize` (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.studyType = None
""" Type of study.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.synthesisType = None
""" Type of synthesis.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.title = None
""" Name for this risk evidence synthesis (human friendly).
Type `str`. """
self.topic = None
""" The category of the EffectEvidenceSynthesis, such as Education,
Treatment, Assessment, etc..
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.url = None
""" Canonical identifier for this risk evidence synthesis, represented
as a URI (globally unique).
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the risk evidence synthesis.
Type `str`. """
super(RiskEvidenceSynthesis, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskEvidenceSynthesis, self).elementProperties()
js.extend([
("approvalDate", "approvalDate", fhirdate.FHIRDate, False, None, False),
("author", "author", contactdetail.ContactDetail, True, None, False),
("certainty", "certainty", RiskEvidenceSynthesisCertainty, True, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("editor", "editor", contactdetail.ContactDetail, True, None, False),
("effectivePeriod", "effectivePeriod", period.Period, False, None, False),
("endorser", "endorser", contactdetail.ContactDetail, True, None, False),
("exposure", "exposure", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("lastReviewDate", "lastReviewDate", fhirdate.FHIRDate, False, None, False),
("name", "name", str, False, None, False),
("note", "note", annotation.Annotation, True, None, False),
("outcome", "outcome", fhirreference.FHIRReference, False, None, True),
("population", "population", fhirreference.FHIRReference, False, None, True),
("publisher", "publisher", str, False, None, False),
("relatedArtifact", "relatedArtifact", relatedartifact.RelatedArtifact, True, None, False),
("reviewer", "reviewer", contactdetail.ContactDetail, True, None, False),
("riskEstimate", "riskEstimate", RiskEvidenceSynthesisRiskEstimate, False, None, False),
("sampleSize", "sampleSize", RiskEvidenceSynthesisSampleSize, False, None, False),
("status", "status", str, False, None, True),
("studyType", "studyType", codeableconcept.CodeableConcept, False, None, False),
("synthesisType", "synthesisType", codeableconcept.CodeableConcept, False, None, False),
("title", "title", str, False, None, False),
("topic", "topic", codeableconcept.CodeableConcept, True, None, False),
("url", "url", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class RiskEvidenceSynthesisCertainty(backboneelement.BackboneElement):
""" How certain is the risk.
A description of the certainty of the risk estimate.
"""
resource_type = "RiskEvidenceSynthesisCertainty"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.certaintySubcomponent = None
""" A component that contributes to the overall certainty.
List of `RiskEvidenceSynthesisCertaintyCertaintySubcomponent` items (represented as `dict` in JSON). """
self.note = None
""" Used for footnotes or explanatory notes.
List of `Annotation` items (represented as `dict` in JSON). """
self.rating = None
""" Certainty rating.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(RiskEvidenceSynthesisCertainty, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskEvidenceSynthesisCertainty, self).elementProperties()
js.extend([
("certaintySubcomponent", "certaintySubcomponent", RiskEvidenceSynthesisCertaintyCertaintySubcomponent, True, None, False),
("note", "note", annotation.Annotation, True, None, False),
("rating", "rating", codeableconcept.CodeableConcept, True, None, False),
])
return js
class RiskEvidenceSynthesisCertaintyCertaintySubcomponent(backboneelement.BackboneElement):
""" A component that contributes to the overall certainty.
A description of a component of the overall certainty.
"""
resource_type = "RiskEvidenceSynthesisCertaintyCertaintySubcomponent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.note = None
""" Used for footnotes or explanatory notes.
List of `Annotation` items (represented as `dict` in JSON). """
self.rating = None
""" Subcomponent certainty rating.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" Type of subcomponent of certainty rating.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(RiskEvidenceSynthesisCertaintyCertaintySubcomponent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskEvidenceSynthesisCertaintyCertaintySubcomponent, self).elementProperties()
js.extend([
("note", "note", annotation.Annotation, True, None, False),
("rating", "rating", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class RiskEvidenceSynthesisRiskEstimate(backboneelement.BackboneElement):
""" What was the estimated risk.
The estimated risk of the outcome.
"""
resource_type = "RiskEvidenceSynthesisRiskEstimate"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.denominatorCount = None
""" Sample size for group measured.
Type `int`. """
self.description = None
""" Description of risk estimate.
Type `str`. """
self.numeratorCount = None
""" Number with the outcome.
Type `int`. """
self.precisionEstimate = None
""" How precise the estimate is.
List of `RiskEvidenceSynthesisRiskEstimatePrecisionEstimate` items (represented as `dict` in JSON). """
self.type = None
""" Type of risk estimate.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.unitOfMeasure = None
""" What unit is the outcome described in?.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.value = None
""" Point estimate.
Type `float`. """
super(RiskEvidenceSynthesisRiskEstimate, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskEvidenceSynthesisRiskEstimate, self).elementProperties()
js.extend([
("denominatorCount", "denominatorCount", int, False, None, False),
("description", "description", str, False, None, False),
("numeratorCount", "numeratorCount", int, False, None, False),
("precisionEstimate", "precisionEstimate", RiskEvidenceSynthesisRiskEstimatePrecisionEstimate, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
("unitOfMeasure", "unitOfMeasure", codeableconcept.CodeableConcept, False, None, False),
("value", "value", float, False, None, False),
])
return js
class RiskEvidenceSynthesisRiskEstimatePrecisionEstimate(backboneelement.BackboneElement):
""" How precise the estimate is.
A description of the precision of the estimate for the effect.
"""
resource_type = "RiskEvidenceSynthesisRiskEstimatePrecisionEstimate"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.from_fhir = None
""" Lower bound.
Type `float`. """
self.level = None
""" Level of confidence interval.
Type `float`. """
self.to = None
""" Upper bound.
Type `float`. """
self.type = None
""" Type of precision estimate.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(RiskEvidenceSynthesisRiskEstimatePrecisionEstimate, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskEvidenceSynthesisRiskEstimatePrecisionEstimate, self).elementProperties()
js.extend([
("from_fhir", "from", float, False, None, False),
("level", "level", float, False, None, False),
("to", "to", float, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class RiskEvidenceSynthesisSampleSize(backboneelement.BackboneElement):
""" What sample size was involved?.
A description of the size of the sample involved in the synthesis.
"""
resource_type = "RiskEvidenceSynthesisSampleSize"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Description of sample size.
Type `str`. """
self.numberOfParticipants = None
""" How many participants?.
Type `int`. """
self.numberOfStudies = None
""" How many studies?.
Type `int`. """
super(RiskEvidenceSynthesisSampleSize, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskEvidenceSynthesisSampleSize, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("numberOfParticipants", "numberOfParticipants", int, False, None, False),
("numberOfStudies", "numberOfStudies", int, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import relatedartifact
except ImportError:
relatedartifact = sys.modules[__package__ + '.relatedartifact']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| bsd-3-clause | b82bb51c19e92461b6e67f43caac3891 | 39.306346 | 135 | 0.617915 | 4.348442 | false | false | false | false |
django/django-localflavor | localflavor/sg/forms.py | 3 | 2857 | """Singapore-specific Form helpers."""
import re
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
NRIC_FIN_RE = re.compile(r'^[SFTG](\d{7})[A-Z]$')
NRIC_FIN_DIGIT_WEIGHT = [2, 7, 6, 5, 4, 3, 2]
NRIC_FIN_CHECKSUM_ST = ['J', 'Z', 'I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A']
NRIC_FIN_CHECKSUM_FG = ['X', 'W', 'U', 'T', 'R', 'Q', 'P', 'N', 'M', 'L', 'K']
class SGPostCodeField(RegexField):
"""
Singapore post code field.
Assumed to be 6 digits.
"""
default_error_messages = {
'invalid': _('Enter a 6-digit postal code.'),
}
def __init__(self, **kwargs):
super().__init__(r'^\d{6}$', **kwargs)
class SGNRICFINField(CharField):
"""
A form field that validates input as a Singapore National Registration.
Identity Card (NRIC) or Foreign Identification Number (FIN)
Based on http://en.wikipedia.org/wiki/National_Registration_Identity_Card
Checksum algorithm:
1) Take for example I want to test the NRIC number S1234567.
Multiply each digit by corresponding weight in this list [2,7,6,5,4,3,2]
and add them together. So 1x2 + 2x7 + 3x6 + 4x5 + 5x4 + 6x3 + 7x2 = 106.
2) If the first letter of the NRIC starts with T or G, add 4 to the total.
3) Then you divide the number by 11 and get the remainder. 106/11=9r7
4) You can get the alphabet depending on the IC type (the first letter in
the IC) using the code below:
S or T: 0=J, 1=Z, 2=I, 3=H, 4=G, 5=F, 6=E, 7=D, 8=C, 9=B, 10=A
F or G: 0=X, 1=W, 2=U, 3=T, 4=R, 5=Q, 6=P, 7=N, 8=M, 9=L, 10=K
"""
default_error_messages = {
'invalid': _('Invalid NRIC/FIN.')
}
def clean(self, value):
"""
Validate NRIC/FIN.
Strips whitespace.
"""
value = super().clean(value)
if value in self.empty_values:
return value
value = re.sub(r'(\s+)', '', force_str(value.upper()))
match = NRIC_FIN_RE.search(value)
if not match:
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = match.group()
digit_list = list(value[1:-1])
products_sum = sum([int(x) * y for x, y in zip(digit_list,
NRIC_FIN_DIGIT_WEIGHT)])
if value[0] in ['T', 'G']:
products_sum += 4
products_sum_remainder = products_sum % 11
checksum_list = NRIC_FIN_CHECKSUM_ST if value[0] in ['S', 'T'] \
else NRIC_FIN_CHECKSUM_FG
checksum = checksum_list[products_sum_remainder]
if checksum == value[len(value) - 1]:
return value
raise ValidationError(self.error_messages['invalid'], code='invalid')
| bsd-3-clause | 762728af2151677fedb0d25374bc3e45 | 34.271605 | 81 | 0.583129 | 3.177976 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/api/metrics_api.py | 1 | 1756 | import datetime
import json
from flask import request
from flask_restful import Resource
from werkzeug.exceptions import BadRequest
from rdr_service import app_util
from rdr_service.api_util import HEALTHPRO
from rdr_service.dao.metrics_dao import MetricsBucketDao
DATE_FORMAT = "%Y-%m-%d"
DAYS_LIMIT = 7
class MetricsApi(Resource):
@app_util.auth_required(HEALTHPRO)
def post(self):
dao = MetricsBucketDao()
resource = request.get_data()
if resource:
resource_json = json.loads(resource)
start_date_str = resource_json.get("start_date")
end_date_str = resource_json.get("end_date")
if not start_date_str or not end_date_str:
raise BadRequest("Start date and end date should not be empty")
try:
start_date = datetime.datetime.strptime(start_date_str, DATE_FORMAT).date()
except ValueError:
raise BadRequest(f"Invalid start date: {start_date_str}")
try:
end_date = datetime.datetime.strptime(end_date_str, DATE_FORMAT).date()
except ValueError:
raise BadRequest(f"Invalid end date: {end_date_str}")
date_diff = abs((end_date - start_date).days)
if date_diff > DAYS_LIMIT:
raise BadRequest(
f"Difference between start date and end date \
should not be greater than {DAYS_LIMIT} days"
)
buckets = dao.get_active_buckets(start_date, end_date)
if buckets is None:
return []
return [dao.to_client_json(bucket) for bucket in buckets]
else:
raise BadRequest("Request data is empty")
| bsd-3-clause | a90a6a8059b59fa80a4ff24af8b8d8ab | 37.173913 | 91 | 0.603075 | 4.064815 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/model/bq_pdr_participant_summary.py | 1 | 29463 | #
# BigQuery schemas for PDR that do not contain PII.
#
#
from rdr_service.model.bq_base import BQTable, BQSchema, BQView, BQField, BQFieldTypeEnum, BQFieldModeEnum, \
BQRecordField
from rdr_service.model.bq_participant_summary import (
BQRaceSchema,
BQGenderSchema,
BQModuleStatusSchema,
BQConsentSchema,
BQPatientStatusSchema,
BQBiobankOrderSchema,
BQPairingHistorySchema,
BQSexualOrientationSchema
)
class BQPDRPhysicalMeasurements(BQSchema):
"""
PDR Participant Physical Measurements
"""
pm_status = BQField('pm_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
pm_status_id = BQField('pm_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
pm_finalized = BQField('pm_finalized', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
pm_physical_measurements_id = BQField('pm_physical_measurements_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
pm_amended_measurements_id = BQField('pm_amended_measurements_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
pm_final = BQField('pm_final', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
pm_restored = BQField('pm_restored', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
pm_questionnaire_response_id = BQField('pm_questionnaire_response_id',
BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
pm_collect_type = BQField('pm_collect_type', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
pm_collect_type_id = BQField('pm_collect_type_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
pm_origin = BQField('pm_origin', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
pm_origin_measurement_unit = BQField('pm_origin_measurement_unit', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
pm_origin_measurement_unit_id = BQField('pm_origin_measurement_unit_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
# TODO: Deprecate use of this class and add these fields to the BQBiobankOrderSchema
class BQPDRBiospecimenSchema(BQSchema):
"""
PDR Summary of Biobank Orders and Tests
"""
biosp_status = BQField('biosp_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
biosp_status_id = BQField('biosp_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
biosp_order_time = BQField('biosp_order_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
biosp_isolate_dna = BQField('biosp_isolate_dna', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
biosp_isolate_dna_confirmed = BQField('biosp_isolate_dna_confirmed',
BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
biosp_baseline_tests = BQField('biosp_baseline_tests', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
biosp_baseline_tests_confirmed = BQField('biosp_baseline_tests_confirmed',
BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQPDREhrReceiptSchema(BQSchema):
"""
PDR Participant EHR Receipt Histories
"""
file_timestamp = BQField('file_timestamp', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
first_seen = BQField('first_seen', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
last_seen = BQField('last_seen', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
participant_ehr_receipt_id = BQField('participant_ehr_receipt_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
class BQPDRParticipantSummarySchema(BQSchema):
"""
A subset of participant summary for PDR that does not contain PII.
Note: !!! If you add fields here, remember to add them to the View as well. !!!
Note: Do not use camelCase for property names. Property names must exactly match BQ
field names.
"""
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
participant_id = BQField('participant_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
participant_origin = BQField('participant_origin', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
addr_state = BQField('addr_state', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
addr_zip = BQField('addr_zip', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
is_ghost_id = BQField('is_ghost_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
sign_up_time = BQField('sign_up_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_status = BQField('enrollment_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
enrollment_status_id = BQField('enrollment_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
enrollment_member = BQField('enrollment_member', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_core_ordered = BQField('enrollment_core_ordered', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_core_stored = BQField('enrollment_core_stored', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
# PDR-106: The EHR fields are needed in PDR by PTSC and for consistency should come from RDR vs. Curation BigQuery
ehr_status = BQField('ehr_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
ehr_status_id = BQField('ehr_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ehr_receipt = BQField('ehr_receipt', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
ehr_update = BQField('ehr_update', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
withdrawal_status = BQField('withdrawal_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
withdrawal_status_id = BQField('withdrawal_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
withdrawal_time = BQField('withdrawal_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
withdrawal_authored = BQField('withdrawal_authored', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
withdrawal_reason = BQField('withdrawal_reason', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
withdrawal_reason_id = BQField('withdrawal_reason_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
withdrawal_reason_justification = BQField('withdrawal_reason_justification', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
hpo = BQField('hpo', BQFieldTypeEnum.STRING, BQFieldModeEnum.REQUIRED)
hpo_id = BQField('hpo_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
organization = BQField('organization', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
organization_id = BQField('organization_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
site = BQField('site', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
site_id = BQField('site_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
date_of_birth = BQField('date_of_birth', BQFieldTypeEnum.DATE, BQFieldModeEnum.NULLABLE)
primary_language = BQField('primary_language', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
education = BQField('education', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
education_id = BQField('education_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
income = BQField('income', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
income_id = BQField('income_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
sex = BQField('sex', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
sex_id = BQField('sex_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
pm = BQRecordField('pm', schema=BQPDRPhysicalMeasurements)
races = BQRecordField('races', schema=BQRaceSchema)
genders = BQRecordField('genders', schema=BQGenderSchema)
sexual_orientations = BQRecordField('sexual_orientations', schema=BQSexualOrientationSchema)
modules = BQRecordField('modules', schema=BQModuleStatusSchema)
consents = BQRecordField('consents', schema=BQConsentSchema)
biospec = BQRecordField('biospec', schema=BQPDRBiospecimenSchema)
ubr_sex = BQField('ubr_sex', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ubr_sexual_orientation = BQField('ubr_sexual_orientation', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ubr_gender_identity = BQField('ubr_gender_identity', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ubr_ethnicity = BQField('ubr_ethnicity', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ubr_geography = BQField('ubr_geography', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ubr_education = BQField('ubr_education', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ubr_income = BQField('ubr_income', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ubr_sexual_gender_minority = BQField('ubr_sexual_gender_minority', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
ubr_overall = BQField('ubr_overall', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ubr_age_at_consent = BQField('ubr_age_at_consent', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
consent_cohort = BQField('consent_cohort', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
consent_cohort_id = BQField('consent_cohort_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
email_available = BQField('email_available', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
phone_number_available = BQField('phone_number_available', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ubr_disability = BQField('ubr_disability', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
patient_statuses = BQRecordField('patient_statuses', schema=BQPatientStatusSchema)
test_participant = BQField('test_participant', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
suspension_status = BQField('suspension_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
suspension_status_id = BQField('suspension_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
suspension_time = BQField('suspension_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
cohort_2_pilot_flag = BQField('cohort_2_pilot_flag', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
cohort_2_pilot_flag_id = BQField('cohort_2_pilot_flag_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
biobank_orders = BQRecordField('biobank_orders', schema=BQBiobankOrderSchema)
# PDR-166: Additional EHR status / history information enabled by DA-1781
is_ehr_data_available = BQField('is_ehr_data_available', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
was_ehr_data_available = BQField('was_ehr_data_available', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
first_ehr_receipt_time = BQField('first_ehr_receipt_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
latest_ehr_receipt_time = BQField('latest_ehr_receipt_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
ehr_receipts = BQRecordField('ehr_receipts', schema=BQPDREhrReceiptSchema)
# PDR-176: Participant deceased status info
deceased_authored = BQField('deceased_authored', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
deceased_status = BQField('deceased_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
deceased_status_id = BQField('deceased_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
# PDR-178: CABoR details. This is part of ConsentPII, but for various reasons the easiest way to align with
# RDR CABoR tracking is to surface the appropriate authored date here. Presence of a date (vs. null/None also
# acts as the true/false flag equivalent to RDR participant_summary.consent_for_cabor field
cabor_authored = BQField('cabor_authored', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
biobank_id = BQField('biobank_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
# PDR-236: Support for new RDR participant_summary.enrollment_core_minus_pm_time field in PDR data
enrollment_core_minus_pm = BQField('enrollment_core_minus_pm', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
# PDR-252: Need to provide AIAN withdrawal ceremony status
withdrawal_aian_ceremony_status = \
BQField('withdrawal_aian_ceremony_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
withdrawal_aian_ceremony_status_id = \
BQField('withdrawal_aian_ceremony_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
# TODO: Exclude date of death initially in case it constitutes PII. Add to end of field list if it is
# enabled later
# date_of_death = BQField('date_of_death', BQFieldTypeEnum.DATE, BQFieldModeEnum.NULLABLE)
enrl_registered_time = BQField('enrl_registered_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrl_participant_time = BQField('enrl_participant_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrl_participant_plus_ehr_time = BQField('enrl_participant_plus_ehr_time', BQFieldTypeEnum.DATETIME,
BQFieldModeEnum.NULLABLE)
enrl_core_participant_minus_pm_time = BQField('enrl_core_participant_minus_pm_time', BQFieldTypeEnum.DATETIME,
BQFieldModeEnum.NULLABLE)
enrl_core_participant_time = BQField('enrl_core_participant_time', BQFieldTypeEnum.DATETIME,
BQFieldModeEnum.NULLABLE)
pairing_history = BQRecordField('pairing_history', schema=BQPairingHistorySchema)
enrl_status = BQField('enrl_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
enrl_status_id = BQField('enrl_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
age_at_consent = BQField('age_at_consent', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
research_id = BQField('research_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
# New Goal 1 additions, ingested from RDR
enrollment_status_legacy_v2 = BQField('enrollment_status_legacy_v2', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
enrollment_status_legacy_v2_id = BQField('enrollment_status_legacy_v2_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
enrollment_status_v3_0 = BQField('enrollment_status_v3_0', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_0_id = BQField('enrollment_status_v3_0_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_0_participant_time = BQField('enrollment_status_v3_0_participant_time',
BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_0_participant_plus_ehr_time = BQField('enrollment_status_v3_0_participant_plus_ehr_time',
BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_0_pmb_eligible_time = BQField('enrollment_status_v3_0_pmb_eligible_time',
BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_0_core_minus_pm_time = BQField('enrollment_status_v3_0_core_minus_pm_time',
BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_0_core_time = BQField('enrollment_status_v3_0_core_time', BQFieldTypeEnum.DATETIME,
BQFieldModeEnum.NULLABLE)
enrollment_status_v3_1 = BQField('enrollment_status_v3_1', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_1_id = BQField('enrollment_status_v3_1_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_1_participant_time = BQField('enrollment_status_v3_1_participant_time',
BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_1_participant_plus_ehr_time = BQField('enrollment_status_v3_1_participant_plus_ehr_time',
BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_1_participant_plus_basics_time = BQField('enrollment_status_v3_1_participant_plus_basics_time',
BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_1_core_minus_pm_time = BQField('enrollment_status_v3_1_core_minus_pm_time',
BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
enrollment_status_v3_1_core_time = BQField('enrollment_status_v3_1_core_time', BQFieldTypeEnum.DATETIME,
BQFieldModeEnum.NULLABLE)
enrollment_status_v3_1_participant_plus_baseline_time = \
BQField('enrollment_status_v3_1_participant_plus_baseline_time', BQFieldTypeEnum.DATETIME,
BQFieldModeEnum.NULLABLE)
health_datastream_sharing_status_v3_1 = BQField('health_datastream_sharing_status_v3_1',
BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
health_datastream_sharing_status_v3_1_id = BQField('health_datastream_sharing_status_v3_1_id',
BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQPDRParticipantSummary(BQTable):
""" PDR Participant Summary BigQuery Table """
__tablename__ = 'pdr_participant'
__schema__ = BQPDRParticipantSummarySchema
class BQPDRParticipantSummaryView(BQView):
""" PDR Team view of the Participant Summary """
__viewname__ = 'v_pdr_participant'
__viewdescr__ = 'PDR Participant Summary View'
__table__ = BQPDRParticipantSummary
__pk_id__ = 'participant_id'
# We need to build a SQL statement with all fields except sub-tables and remove duplicates.
__sql__ = """
SELECT
%%FIELD_NAMES%%
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps
WHERE ps.rn = 1 and ps.withdrawal_status_id = 1 and ps.test_participant != 1
""".replace('%%FIELD_NAMES%%', BQPDRParticipantSummarySchema.get_sql_field_names(
exclude_fields=[
'pm',
'genders',
'races',
'sexual_orientations',
'modules',
'consents',
'biospec',
'patient_statuses',
'biobank_orders',
'ehr_receipts',
'pairing_history'
])
)
class BQPDRParticipantSummaryAllView(BQPDRParticipantSummaryView):
__viewname__ = 'v_pdr_participant_all'
__viewdescr__ = 'PDR Participant Summary All View'
__sql__ = """
SELECT
%%FIELD_NAMES%%
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps
WHERE ps.rn = 1
""".replace('%%FIELD_NAMES%%', BQPDRParticipantSummarySchema.get_sql_field_names(
exclude_fields=[
'pm',
'genders',
'races',
'sexual_orientations',
'modules',
'consents',
'biospec',
'patient_statuses',
'biobank_orders',
'ehr_receipts',
'pairing_history'
])
)
# TODO: This is now a custom view in PDR BigQuery (as of PDR-262). Needs to be disabled here so it will not be
# updated by migrate-bq tool. Consider moving all custom views into our model? Some (like this one) will have
# extremely complicated SQL definitions, so unclear if that is a viable/best solution
# class BQPDRParticipantSummaryWithdrawnView(BQView):
# __viewname__ = 'v_pdr_participant_withdrawn'
# __viewdescr__ = 'PDR Participant Summary Withdrawn View'
# __table__ = BQPDRParticipantSummary
# __sql__ = BQPDRParticipantSummaryView.__sql__.replace('ps.withdrawal_status_id = 1',
# 'ps.withdrawal_status_id != 1')
class BQPDRPMView(BQView):
__viewname__ = 'v_pdr_participant_pm'
__viewdescr__ = 'PDR Physical Measurements View'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id, nt.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(pm) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRGenderView(BQView):
__viewname__ = 'v_pdr_participant_gender'
__viewdescr__ = 'PDR Participant Gender View'
__table__ = BQPDRParticipantSummary
__pk_id__ = 'participant_id'
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id, nt.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(genders) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRRaceView(BQView):
__viewname__ = 'v_pdr_participant_race'
__viewdescr__ = 'PDR Participant Race View'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id, nt.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(races) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRSexualOrientationView(BQView):
__viewname__ = 'v_pdr_participant_sexual_orientation'
__viewdescr__ = 'PDR Participant Sexual Orientation View'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id, nt.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(sexual_orientations) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRModuleView(BQView):
__viewname__ = 'v_pdr_participant_module'
__viewdescr__ = 'PDR Participant Survey Module View'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id,
nt.mod_module, nt.mod_baseline_module,
CAST(nt.mod_authored AS DATETIME) as mod_authored, CAST(nt.mod_created AS DATETIME) as mod_created,
nt.mod_language, nt.mod_status, nt.mod_status_id, nt.mod_response_status, nt.mod_response_status_id,
nt.mod_external_id, nt.mod_questionnaire_response_id, nt.mod_consent, nt.mod_consent_value,
nt.mod_consent_value_id, nt.mod_consent_expired, nt.mod_non_participant_answer,
nt.mod_classification_type, nt.mod_classification_type_id
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(modules) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRConsentView(BQView):
__viewname__ = 'v_pdr_participant_consent'
__viewdescr__ = 'PDR Participant Consent View'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id, nt.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(consents) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRBioSpecView(BQView):
__viewname__ = 'v_pdr_biospec'
__viewdescr__ = 'PDR Participant BioBank Order View'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id, nt.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(biospec) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRPatientStatuesView(BQView):
__viewname__ = 'v_pdr_participant_patient_status'
__viewdescr__ = 'PDR Participant Patient Status View'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id, nt.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(patient_statuses) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRParticipantBiobankOrderView(BQView):
__viewname__ = 'v_pdr_participant_biobank_order'
__viewdescr__ = 'PDR Participant Biobank Order Details view'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id,
nt.bbo_biobank_order_id,
nt.bbo_created,
nt.bbo_status,
nt.bbo_status_id,
nt.bbo_collected_site,
nt.bbo_collected_site_id,
nt.bbo_processed_site,
nt.bbo_processed_site_id,
nt.bbo_finalized_site,
nt.bbo_finalized_site_id,
nt.bbo_finalized_time,
nt.bbo_finalized_status,
nt.bbo_finalized_status_id,
nt.bbo_tests_ordered,
nt.bbo_tests_stored,
nt.bbo_collection_method,
nt.bbo_collection_method_id,
nt.bbo_id
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(biobank_orders) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRParticipantBiobankSampleView(BQView):
__viewname__ = 'v_pdr_participant_biobank_sample'
__viewdescr__ = 'PDR Participant Biobank Sample Details view'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id,
bbo.bbo_biobank_order_id,
nt.bbs_test,
nt.bbs_baseline_test,
nt.bbs_dna_test,
nt.bbs_collected,
nt.bbs_processed,
nt.bbs_finalized,
nt.bbs_created,
nt.bbs_confirmed,
nt.bbs_status,
nt.bbs_status_id,
nt.bbs_disposed,
nt.bbs_disposed_reason,
nt.bbs_disposed_reason_id,
nt.bbs_biobank_stored_sample_id,
nt.bbs_id,
nt.bbs_hash_id
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(biobank_orders) as bbo, unnest(bbo.bbo_samples) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDREhrReceiptView(BQView):
__viewname__ = 'v_pdr_participant_ehr_receipt'
__viewdescr__ = 'PDR Participant EHR Receipts View'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id, nt.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(ehr_receipts) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
class BQPDRPairingHistoryView(BQView):
__viewname__ = 'v_pdr_participant_pairing_history'
__viewdescr__ = 'PDR Participant Pairing History View'
__table__ = BQPDRParticipantSummary
__sql__ = """
SELECT ps.id, ps.created, ps.modified, ps.participant_id, nt.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY participant_id ORDER BY modified desc, test_participant desc) AS rn
FROM `{project}`.{dataset}.pdr_participant
) ps cross join unnest(pairing_history) as nt
WHERE ps.rn = 1 and ps.test_participant != 1
"""
| bsd-3-clause | 48576ce471d5c846ccd51f5a1e2d68a6 | 52.471869 | 120 | 0.674948 | 3.343129 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/server.py | 1 | 10865 | # -*- coding: utf-8 -*-
import json
import logging
import requests
try: # Python 2.x
import urllib.parse
except ImportError as e: # Python 3
import urllib.parse as urlparse
from .auth import FHIRAuth
FHIRJSONMimeType = 'application/fhir+json'
logger = logging.getLogger(__name__)
class FHIRUnauthorizedException(Exception):
""" Indicating a 401 response.
"""
def __init__(self, response):
self.response = response
class FHIRPermissionDeniedException(Exception):
""" Indicating a 403 response.
"""
def __init__(self, response):
self.response = response
class FHIRNotFoundException(Exception):
""" Indicating a 404 response.
"""
def __init__(self, response):
self.response = response
class FHIRServer(object):
""" Handles talking to a FHIR server.
"""
def __init__(self, client, base_uri=None, state=None):
self.client = client
self.auth = None
self.base_uri = None
# Use a single requests Session for all "requests"
self.session = requests.Session()
# A URI can't possibly be less than 11 chars
# make sure we end with "/", otherwise the last path component will be
# lost when creating URLs with urllib
if base_uri is not None and len(base_uri) > 10:
self.base_uri = base_uri if '/' == base_uri[-1] else base_uri + '/'
self._capability = None
if state is not None:
self.from_state(state)
if not self.base_uri or len(self.base_uri) <= 10:
raise Exception("FHIRServer must be initialized with `base_uri` or `state` containing the base-URI, but neither happened")
def should_save_state(self):
if self.client is not None:
self.client.save_state()
# MARK: Server CapabilityStatement
@property
def capabilityStatement(self):
self.get_capability()
return self._capability
def get_capability(self, force=False):
""" Returns the server's CapabilityStatement, retrieving it if needed
or forced.
"""
if self._capability is None or force:
logger.info('Fetching CapabilityStatement from {0}'.format(self.base_uri))
from .models import capabilitystatement
conf = capabilitystatement.CapabilityStatement.read_from('metadata', self)
self._capability = conf
security = None
try:
security = conf.rest[0].security
except Exception as e:
logger.info("No REST security statement found in server capability statement")
settings = {
'aud': self.base_uri,
'app_id': self.client.app_id if self.client is not None else None,
'app_secret': self.client.app_secret if self.client is not None else None,
'redirect_uri': self.client.redirect if self.client is not None else None,
}
self.auth = FHIRAuth.from_capability_security(security, settings)
self.should_save_state()
# MARK: Authorization
@property
def desired_scope(self):
return self.client.desired_scope if self.client is not None else None
@property
def launch_token(self):
return self.client.launch_token if self.client is not None else None
@property
def authorize_uri(self):
if self.auth is None:
self.get_capability()
return self.auth.authorize_uri(self)
def handle_callback(self, url):
if self.auth is None:
raise Exception("Not ready to handle callback, I do not have an auth instance")
return self.auth.handle_callback(url, self)
def reauthorize(self):
if self.auth is None:
raise Exception("Not ready to reauthorize, I do not have an auth instance")
return self.auth.reauthorize(self) if self.auth is not None else None
# MARK: Requests
@property
def ready(self):
""" Check whether the server is ready to make calls, i.e. is has
fetched its capability statement and its `auth` instance is ready.
:returns: True if the server can make authenticated calls
"""
return self.auth.ready if self.auth is not None else False
def prepare(self):
""" Check whether the server is ready to make calls, i.e. is has
fetched its capability statement and its `auth` instance is ready.
This method will fetch the capability statement if it hasn't already
been fetched.
:returns: True if the server can make authenticated calls
"""
if self.auth is None:
self.get_capability()
return self.auth.ready if self.auth is not None else False
def request_json(self, path, nosign=False):
""" Perform a request for JSON data against the server's base with the
given relative path.
:param str path: The path to append to `base_uri`
:param bool nosign: If set to True, the request will not be signed
:throws: Exception on HTTP status >= 400
:returns: Decoded JSON response
"""
headers = {'Accept': 'application/json'}
res = self._get(path, headers, nosign)
return res.json()
def request_data(self, path, headers={}, nosign=False):
""" Perform a data request data against the server's base with the
given relative path.
"""
res = self._get(path, None, nosign)
return res.content
def _get(self, path, headers={}, nosign=False):
""" Issues a GET request.
:returns: The response object
"""
assert self.base_uri and path
url = urllib.parse.urljoin(self.base_uri, path)
headers = {
'Accept': FHIRJSONMimeType,
'Accept-Charset': 'UTF-8',
}
if not nosign and self.auth is not None and self.auth.can_sign_headers():
headers = self.auth.signed_headers(headers)
# perform the request but intercept 401 responses, raising our own Exception
res = self.session.get(url, headers=headers)
self.raise_for_status(res)
return res
def put_json(self, path, resource_json, nosign=False):
""" Performs a PUT request of the given JSON, which should represent a
resource, to the given relative path.
:param str path: The path to append to `base_uri`
:param dict resource_json: The JSON representing the resource
:param bool nosign: If set to True, the request will not be signed
:throws: Exception on HTTP status >= 400
:returns: The response object
"""
url = urllib.parse.urljoin(self.base_uri, path)
headers = {
'Content-type': FHIRJSONMimeType,
'Accept': FHIRJSONMimeType,
'Accept-Charset': 'UTF-8',
}
if not nosign and self.auth is not None and self.auth.can_sign_headers():
headers = self.auth.signed_headers(headers)
# perform the request but intercept 401 responses, raising our own Exception
res = self.session.put(url, headers=headers, data=json.dumps(resource_json))
self.raise_for_status(res)
return res
def post_json(self, path, resource_json, nosign=False):
""" Performs a POST of the given JSON, which should represent a
resource, to the given relative path.
:param str path: The path to append to `base_uri`
:param dict resource_json: The JSON representing the resource
:param bool nosign: If set to True, the request will not be signed
:throws: Exception on HTTP status >= 400
:returns: The response object
"""
url = urllib.parse.urljoin(self.base_uri, path)
headers = {
'Content-type': FHIRJSONMimeType,
'Accept': FHIRJSONMimeType,
'Accept-Charset': 'UTF-8',
}
if not nosign and self.auth is not None and self.auth.can_sign_headers():
headers = self.auth.signed_headers(headers)
# perform the request but intercept 401 responses, raising our own Exception
res = self.session.post(url, headers=headers, data=json.dumps(resource_json))
self.raise_for_status(res)
return res
def post_as_form(self, url, formdata, auth=None):
""" Performs a POST request with form-data, expecting to receive JSON.
This method is used in the OAuth2 token exchange and thus doesn't
request fhir+json.
:throws: Exception on HTTP status >= 400
:returns: The response object
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Accept': 'application/json',
}
res = self.session.post(url, data=formdata, auth=auth)
self.raise_for_status(res)
return res
def delete_json(self, path, nosign=False):
""" Issues a DELETE command against the given relative path, accepting
a JSON response.
:param str path: The relative URL path to issue a DELETE against
:param bool nosign: If set to True, the request will not be signed
:returns: The response object
"""
url = urllib.parse.urljoin(self.base_uri, path)
headers = {
'Accept': FHIRJSONMimeType,
'Accept-Charset': 'UTF-8',
}
if not nosign and self.auth is not None and self.auth.can_sign_headers():
headers = self.auth.signed_headers(headers)
# perform the request but intercept 401 responses, raising our own Exception
res = self.session.delete(url)
self.raise_for_status(res)
return res
def raise_for_status(self, response):
if response.status_code < 400:
return
if 401 == response.status_code:
raise FHIRUnauthorizedException(response)
elif 403 == response.status_code:
raise FHIRPermissionDeniedException(response)
elif 404 == response.status_code:
raise FHIRNotFoundException(response)
else:
response.raise_for_status()
# MARK: State Handling
@property
def state(self):
""" Return current state.
"""
return {
'base_uri': self.base_uri,
'auth_type': self.auth.auth_type if self.auth is not None else 'none',
'auth': self.auth.state if self.auth is not None else None,
}
def from_state(self, state):
""" Update ivars from given state information.
"""
assert state
self.base_uri = state.get('base_uri') or self.base_uri
self.auth = FHIRAuth.create(state.get('auth_type'), state=state.get('auth'))
| bsd-3-clause | 71e14bd2680c52496f64fd6cd782aa42 | 34.048387 | 134 | 0.615647 | 4.221057 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/medicationdispense.py | 1 | 12359 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MedicationDispense) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class MedicationDispense(domainresource.DomainResource):
""" Dispensing a medication to a named patient.
Indicates that a medication product is to be or has been dispensed for a
named person/patient. This includes a description of the medication
product (supply) provided and the instructions for administering the
medication. The medication dispense is the result of a pharmacy system
responding to a medication order.
"""
resource_type = "MedicationDispense"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.authorizingPrescription = None
""" Medication order that authorizes the dispense.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.category = None
""" Type of medication dispense.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.context = None
""" Encounter / Episode associated with event.
Type `FHIRReference` (represented as `dict` in JSON). """
self.daysSupply = None
""" Amount of medication expressed as a timing amount.
Type `Quantity` (represented as `dict` in JSON). """
self.destination = None
""" Where the medication was sent.
Type `FHIRReference` (represented as `dict` in JSON). """
self.detectedIssue = None
""" Clinical issue with action.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.dosageInstruction = None
""" How the medication is to be used by the patient or administered by
the caregiver.
List of `Dosage` items (represented as `dict` in JSON). """
self.eventHistory = None
""" A list of relevant lifecycle events.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.identifier = None
""" External identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.location = None
""" Where the dispense occurred.
Type `FHIRReference` (represented as `dict` in JSON). """
self.medicationCodeableConcept = None
""" What medication was supplied.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.medicationReference = None
""" What medication was supplied.
Type `FHIRReference` (represented as `dict` in JSON). """
self.note = None
""" Information about the dispense.
List of `Annotation` items (represented as `dict` in JSON). """
self.partOf = None
""" Event that dispense is part of.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.performer = None
""" Who performed event.
List of `MedicationDispensePerformer` items (represented as `dict` in JSON). """
self.quantity = None
""" Amount dispensed.
Type `Quantity` (represented as `dict` in JSON). """
self.receiver = None
""" Who collected the medication.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.status = None
""" preparation | in-progress | cancelled | on-hold | completed |
entered-in-error | stopped | unknown.
Type `str`. """
self.statusReasonCodeableConcept = None
""" Why a dispense was not performed.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.statusReasonReference = None
""" Why a dispense was not performed.
Type `FHIRReference` (represented as `dict` in JSON). """
self.subject = None
""" Who the dispense is for.
Type `FHIRReference` (represented as `dict` in JSON). """
self.substitution = None
""" Whether a substitution was performed on the dispense.
Type `MedicationDispenseSubstitution` (represented as `dict` in JSON). """
self.supportingInformation = None
""" Information that supports the dispensing of the medication.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.type = None
""" Trial fill, partial fill, emergency fill, etc..
Type `CodeableConcept` (represented as `dict` in JSON). """
self.whenHandedOver = None
""" When product was given out.
Type `FHIRDate` (represented as `str` in JSON). """
self.whenPrepared = None
""" When product was packaged and reviewed.
Type `FHIRDate` (represented as `str` in JSON). """
super(MedicationDispense, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationDispense, self).elementProperties()
js.extend([
("authorizingPrescription", "authorizingPrescription", fhirreference.FHIRReference, True, None, False),
("category", "category", codeableconcept.CodeableConcept, False, None, False),
("context", "context", fhirreference.FHIRReference, False, None, False),
("daysSupply", "daysSupply", quantity.Quantity, False, None, False),
("destination", "destination", fhirreference.FHIRReference, False, None, False),
("detectedIssue", "detectedIssue", fhirreference.FHIRReference, True, None, False),
("dosageInstruction", "dosageInstruction", dosage.Dosage, True, None, False),
("eventHistory", "eventHistory", fhirreference.FHIRReference, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("location", "location", fhirreference.FHIRReference, False, None, False),
("medicationCodeableConcept", "medicationCodeableConcept", codeableconcept.CodeableConcept, False, "medication", True),
("medicationReference", "medicationReference", fhirreference.FHIRReference, False, "medication", True),
("note", "note", annotation.Annotation, True, None, False),
("partOf", "partOf", fhirreference.FHIRReference, True, None, False),
("performer", "performer", MedicationDispensePerformer, True, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("receiver", "receiver", fhirreference.FHIRReference, True, None, False),
("status", "status", str, False, None, True),
("statusReasonCodeableConcept", "statusReasonCodeableConcept", codeableconcept.CodeableConcept, False, "statusReason", False),
("statusReasonReference", "statusReasonReference", fhirreference.FHIRReference, False, "statusReason", False),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("substitution", "substitution", MedicationDispenseSubstitution, False, None, False),
("supportingInformation", "supportingInformation", fhirreference.FHIRReference, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
("whenHandedOver", "whenHandedOver", fhirdate.FHIRDate, False, None, False),
("whenPrepared", "whenPrepared", fhirdate.FHIRDate, False, None, False),
])
return js
from . import backboneelement
class MedicationDispensePerformer(backboneelement.BackboneElement):
""" Who performed event.
Indicates who or what performed the event.
"""
resource_type = "MedicationDispensePerformer"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.actor = None
""" Individual who was performing.
Type `FHIRReference` (represented as `dict` in JSON). """
self.function = None
""" Who performed the dispense and what they did.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationDispensePerformer, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationDispensePerformer, self).elementProperties()
js.extend([
("actor", "actor", fhirreference.FHIRReference, False, None, True),
("function", "function", codeableconcept.CodeableConcept, False, None, False),
])
return js
class MedicationDispenseSubstitution(backboneelement.BackboneElement):
""" Whether a substitution was performed on the dispense.
Indicates whether or not substitution was made as part of the dispense. In
some cases, substitution will be expected but does not happen, in other
cases substitution is not expected but does happen. This block explains
what substitution did or did not happen and why. If nothing is specified,
substitution was not done.
"""
resource_type = "MedicationDispenseSubstitution"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.reason = None
""" Why was substitution made.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.responsibleParty = None
""" Who is responsible for the substitution.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.type = None
""" Code signifying whether a different drug was dispensed from what
was prescribed.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.wasSubstituted = None
""" Whether a substitution was or was not performed on the dispense.
Type `bool`. """
super(MedicationDispenseSubstitution, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationDispenseSubstitution, self).elementProperties()
js.extend([
("reason", "reason", codeableconcept.CodeableConcept, True, None, False),
("responsibleParty", "responsibleParty", fhirreference.FHIRReference, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
("wasSubstituted", "wasSubstituted", bool, False, None, True),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + '.dosage']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| bsd-3-clause | 4eb5c23117ff23b64d84ff87384a1a77 | 42.213287 | 138 | 0.634436 | 4.321329 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/codesystem_tests.py | 1 | 16116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import codesystem
from .fhirdate import FHIRDate
class CodeSystemTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("CodeSystem", js["resourceType"])
return codesystem.CodeSystem(js)
def testCodeSystem1(self):
inst = self.instantiate_from("codesystem-example-supplement.json")
self.assertIsNotNone(inst, "Must have instantiated a CodeSystem instance")
self.implCodeSystem1(inst)
js = inst.as_json()
self.assertEqual("CodeSystem", js["resourceType"])
inst2 = codesystem.CodeSystem(js)
self.implCodeSystem1(inst2)
def implCodeSystem1(self, inst):
self.assertEqual(inst.concept[0].code, "chol-mmol")
self.assertEqual(inst.concept[0].property[0].code, "legacy")
self.assertFalse(inst.concept[0].property[0].valueBoolean)
self.assertEqual(inst.concept[1].code, "chol-mass")
self.assertEqual(inst.concept[1].property[0].code, "legacy")
self.assertTrue(inst.concept[1].property[0].valueBoolean)
self.assertEqual(inst.concept[2].code, "chol")
self.assertEqual(inst.concept[2].property[0].code, "legacy")
self.assertTrue(inst.concept[2].property[0].valueBoolean)
self.assertEqual(inst.contact[0].name, "FHIR project team")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.content, "supplement")
self.assertEqual(inst.date.date, FHIRDate("2016-01-28").date)
self.assertEqual(inst.date.as_json(), "2016-01-28")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-supplement")
self.assertEqual(inst.name, "CholCodeLegacyStatus")
self.assertEqual(inst.property[0].code, "legacy")
self.assertEqual(inst.property[0].description, "hether the test is currently performed")
self.assertEqual(inst.property[0].type, "boolean")
self.assertEqual(inst.publisher, "ACME Co")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.supplements, "http://hl7.org/fhir/CodeSystem/example")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/CodeSystem/example-supplement")
self.assertEqual(inst.version, "201801103")
def testCodeSystem2(self):
inst = self.instantiate_from("codesystem-list-example-codes.json")
self.assertIsNotNone(inst, "Must have instantiated a CodeSystem instance")
self.implCodeSystem2(inst)
js = inst.as_json()
self.assertEqual("CodeSystem", js["resourceType"])
inst2 = codesystem.CodeSystem(js)
self.implCodeSystem2(inst2)
def implCodeSystem2(self, inst):
self.assertTrue(inst.caseSensitive)
self.assertEqual(inst.concept[0].code, "alerts")
self.assertEqual(inst.concept[0].definition, "A list of alerts for the patient.")
self.assertEqual(inst.concept[0].display, "Alerts")
self.assertEqual(inst.concept[1].code, "adverserxns")
self.assertEqual(inst.concept[1].definition, "A list of part adverse reactions.")
self.assertEqual(inst.concept[1].display, "Adverse Reactions")
self.assertEqual(inst.concept[2].code, "allergies")
self.assertEqual(inst.concept[2].definition, "A list of Allergies for the patient.")
self.assertEqual(inst.concept[2].display, "Allergies")
self.assertEqual(inst.concept[3].code, "medications")
self.assertEqual(inst.concept[3].definition, "A list of medication statements for the patient.")
self.assertEqual(inst.concept[3].display, "Medication List")
self.assertEqual(inst.concept[4].code, "problems")
self.assertEqual(inst.concept[4].definition, "A list of problems that the patient is known of have (or have had in the past).")
self.assertEqual(inst.concept[4].display, "Problem List")
self.assertEqual(inst.concept[5].code, "worklist")
self.assertEqual(inst.concept[5].definition, "A list of items that constitute a set of work to be performed (typically this code would be specialized for more specific uses, such as a ward round list).")
self.assertEqual(inst.concept[5].display, "Worklist")
self.assertEqual(inst.concept[6].code, "waiting")
self.assertEqual(inst.concept[6].definition, "A list of items waiting for an event (perhaps a surgical patient waiting list).")
self.assertEqual(inst.concept[6].display, "Waiting List")
self.assertEqual(inst.concept[7].code, "protocols")
self.assertEqual(inst.concept[7].definition, "A set of protocols to be followed.")
self.assertEqual(inst.concept[7].display, "Protocols")
self.assertEqual(inst.concept[8].code, "plans")
self.assertEqual(inst.concept[8].definition, "A set of care plans that apply in a particular context of care.")
self.assertEqual(inst.concept[8].display, "Care Plans")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.content, "complete")
self.assertEqual(inst.description, "Example use codes for the List resource - typical kinds of use.")
self.assertFalse(inst.experimental)
self.assertEqual(inst.extension[0].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-wg")
self.assertEqual(inst.extension[0].valueCode, "fhir")
self.assertEqual(inst.extension[1].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-standards-status")
self.assertEqual(inst.extension[1].valueCode, "draft")
self.assertEqual(inst.extension[2].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-fmm")
self.assertEqual(inst.extension[2].valueInteger, 1)
self.assertEqual(inst.id, "list-example-codes")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:2.16.840.1.113883.4.642.1.1105")
self.assertEqual(inst.meta.lastUpdated.date, FHIRDate("2018-12-27T22:37:54.724+11:00").date)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2018-12-27T22:37:54.724+11:00")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablecodesystem")
self.assertEqual(inst.name, "ExampleUseCodesForList")
self.assertEqual(inst.publisher, "FHIR Project")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Example Use Codes for List")
self.assertEqual(inst.url, "http://terminology.hl7.org/CodeSystem/list-example-use-codes")
self.assertEqual(inst.valueSet, "http://hl7.org/fhir/ValueSet/list-example-codes")
self.assertEqual(inst.version, "4.0.0")
def testCodeSystem3(self):
inst = self.instantiate_from("codesystem-examplescenario-actor-type.json")
self.assertIsNotNone(inst, "Must have instantiated a CodeSystem instance")
self.implCodeSystem3(inst)
js = inst.as_json()
self.assertEqual("CodeSystem", js["resourceType"])
inst2 = codesystem.CodeSystem(js)
self.implCodeSystem3(inst2)
def implCodeSystem3(self, inst):
self.assertTrue(inst.caseSensitive)
self.assertEqual(inst.concept[0].code, "person")
self.assertEqual(inst.concept[0].definition, "A person.")
self.assertEqual(inst.concept[0].display, "Person")
self.assertEqual(inst.concept[1].code, "entity")
self.assertEqual(inst.concept[1].definition, "A system.")
self.assertEqual(inst.concept[1].display, "System")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].value, "fhir@lists.hl7.org")
self.assertEqual(inst.content, "complete")
self.assertEqual(inst.date.date, FHIRDate("2018-12-27T22:37:54+11:00").date)
self.assertEqual(inst.date.as_json(), "2018-12-27T22:37:54+11:00")
self.assertEqual(inst.description, "The type of actor - system or human.")
self.assertFalse(inst.experimental)
self.assertEqual(inst.extension[0].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-wg")
self.assertEqual(inst.extension[0].valueCode, "fhir")
self.assertEqual(inst.extension[1].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-standards-status")
self.assertEqual(inst.extension[1].valueCode, "trial-use")
self.assertEqual(inst.extension[2].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-fmm")
self.assertEqual(inst.extension[2].valueInteger, 0)
self.assertEqual(inst.id, "examplescenario-actor-type")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:2.16.840.1.113883.4.642.1.859")
self.assertEqual(inst.meta.lastUpdated.date, FHIRDate("2018-12-27T22:37:54.724+11:00").date)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2018-12-27T22:37:54.724+11:00")
self.assertEqual(inst.name, "ExampleScenarioActorType")
self.assertEqual(inst.publisher, "HL7 (FHIR Project)")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "ExampleScenarioActorType")
self.assertEqual(inst.url, "http://hl7.org/fhir/examplescenario-actor-type")
self.assertEqual(inst.valueSet, "http://hl7.org/fhir/ValueSet/examplescenario-actor-type")
self.assertEqual(inst.version, "4.0.0")
def testCodeSystem4(self):
inst = self.instantiate_from("codesystem-example-summary.json")
self.assertIsNotNone(inst, "Must have instantiated a CodeSystem instance")
self.implCodeSystem4(inst)
js = inst.as_json()
self.assertEqual("CodeSystem", js["resourceType"])
inst2 = codesystem.CodeSystem(js)
self.implCodeSystem4(inst2)
def implCodeSystem4(self, inst):
self.assertTrue(inst.caseSensitive)
self.assertEqual(inst.contact[0].name, "FHIR project team")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.content, "not-present")
self.assertEqual(inst.count, 92)
self.assertEqual(inst.description, "This is an example code system summary for the ACME codes for body site.")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "summary")
self.assertEqual(inst.jurisdiction[0].coding[0].code, "CA")
self.assertEqual(inst.jurisdiction[0].coding[0].system, "urn:iso:std:iso:3166")
self.assertEqual(inst.name, "Code system summary example for ACME body sites")
self.assertEqual(inst.publisher, "HL7 International")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/CodeSystem/summary")
self.assertEqual(inst.useContext[0].code.code, "species")
self.assertEqual(inst.useContext[0].code.system, "http://example.org/CodeSystem/contexttype")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].code, "337915000")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].display, "Homo sapiens (organism)")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.version, "4.0.0")
def testCodeSystem5(self):
inst = self.instantiate_from("codesystem-example.json")
self.assertIsNotNone(inst, "Must have instantiated a CodeSystem instance")
self.implCodeSystem5(inst)
js = inst.as_json()
self.assertEqual("CodeSystem", js["resourceType"])
inst2 = codesystem.CodeSystem(js)
self.implCodeSystem5(inst2)
def implCodeSystem5(self, inst):
self.assertTrue(inst.caseSensitive)
self.assertEqual(inst.concept[0].code, "chol-mmol")
self.assertEqual(inst.concept[0].definition, "Serum Cholesterol, in mmol/L")
self.assertEqual(inst.concept[0].designation[0].use.code, "internal-label")
self.assertEqual(inst.concept[0].designation[0].use.system, "http://acme.com/config/fhir/codesystems/internal")
self.assertEqual(inst.concept[0].designation[0].value, "From ACME POC Testing")
self.assertEqual(inst.concept[0].display, "SChol (mmol/L)")
self.assertEqual(inst.concept[1].code, "chol-mass")
self.assertEqual(inst.concept[1].definition, "Serum Cholesterol, in mg/L")
self.assertEqual(inst.concept[1].designation[0].use.code, "internal-label")
self.assertEqual(inst.concept[1].designation[0].use.system, "http://acme.com/config/fhir/codesystems/internal")
self.assertEqual(inst.concept[1].designation[0].value, "From Paragon Labs")
self.assertEqual(inst.concept[1].display, "SChol (mg/L)")
self.assertEqual(inst.concept[2].code, "chol")
self.assertEqual(inst.concept[2].definition, "Serum Cholesterol")
self.assertEqual(inst.concept[2].designation[0].use.code, "internal-label")
self.assertEqual(inst.concept[2].designation[0].use.system, "http://acme.com/config/fhir/codesystems/internal")
self.assertEqual(inst.concept[2].designation[0].value, "Obdurate Labs uses this with both kinds of units...")
self.assertEqual(inst.concept[2].display, "SChol")
self.assertEqual(inst.contact[0].name, "FHIR project team")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.content, "complete")
self.assertEqual(inst.date.date, FHIRDate("2016-01-28").date)
self.assertEqual(inst.date.as_json(), "2016-01-28")
self.assertEqual(inst.description, "This is an example code system that includes all the ACME codes for serum/plasma cholesterol from v2.36.")
self.assertTrue(inst.experimental)
self.assertEqual(inst.filter[0].code, "acme-plasma")
self.assertEqual(inst.filter[0].description, "An internal filter used to select codes that are only used with plasma")
self.assertEqual(inst.filter[0].operator[0], "=")
self.assertEqual(inst.filter[0].value, "the value of this filter is either 'true' or 'false'")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://acme.com/identifiers/codesystems")
self.assertEqual(inst.identifier[0].value, "internal-cholesterol-inl")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablecodesystem")
self.assertEqual(inst.name, "ACMECholCodesBlood")
self.assertEqual(inst.publisher, "Acme Co")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "ACME Codes for Cholesterol in Serum/Plasma")
self.assertEqual(inst.url, "http://hl7.org/fhir/CodeSystem/example")
self.assertEqual(inst.version, "20160128")
| bsd-3-clause | 3347bd8c90a606f4524eccfd3a431b54 | 60.51145 | 211 | 0.68764 | 3.522623 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/observationdefinition_tests.py | 1 | 1758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import observationdefinition
from .fhirdate import FHIRDate
class ObservationDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ObservationDefinition", js["resourceType"])
return observationdefinition.ObservationDefinition(js)
def testObservationDefinition1(self):
inst = self.instantiate_from("observationdefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ObservationDefinition instance")
self.implObservationDefinition1(inst)
js = inst.as_json()
self.assertEqual("ObservationDefinition", js["resourceType"])
inst2 = observationdefinition.ObservationDefinition(js)
self.implObservationDefinition1(inst2)
def implObservationDefinition1(self, inst):
self.assertEqual(inst.code.coding[0].code, "15074-8")
self.assertEqual(inst.code.coding[0].display, "Glucose [Moles/volume] in Blood")
self.assertEqual(inst.code.coding[0].system, "http://loinc.org")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | 5d89230a7f5391f48bace5220a918140 | 39.883721 | 103 | 0.694539 | 3.632231 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/basic_tests.py | 1 | 5714 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import io
import json
import os
import unittest
from . import basic
from .fhirdate import FHIRDate
class BasicTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Basic", js["resourceType"])
return basic.Basic(js)
def testBasic1(self):
inst = self.instantiate_from("basic-example-narrative.json")
self.assertIsNotNone(inst, "Must have instantiated a Basic instance")
self.implBasic1(inst)
js = inst.as_json()
self.assertEqual("Basic", js["resourceType"])
inst2 = basic.Basic(js)
self.implBasic1(inst2)
def implBasic1(self, inst):
self.assertEqual(inst.code.text, "Example Narrative Tester")
self.assertEqual(inst.id, "basic-example-narrative")
self.assertEqual(inst.text.status, "additional")
def testBasic2(self):
inst = self.instantiate_from("basic-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Basic instance")
self.implBasic2(inst)
js = inst.as_json()
self.assertEqual("Basic", js["resourceType"])
inst2 = basic.Basic(js)
self.implBasic2(inst2)
def implBasic2(self, inst):
self.assertEqual(inst.code.coding[0].code, "referral")
self.assertEqual(inst.code.coding[0].system, "http://hl7.org/fhir/basic-resource-type")
self.assertEqual(inst.created.date, FHIRDate("2013-05-14").date)
self.assertEqual(inst.created.as_json(), "2013-05-14")
self.assertEqual(inst.extension[0].url, "http://example.org/do-not-use/fhir-extensions/referral#requestingPractitioner")
self.assertEqual(inst.extension[1].url, "http://example.org/do-not-use/fhir-extensions/referral#notes")
self.assertEqual(inst.extension[1].valueString, "The patient had fever peaks over the last couple of days. He is worried about these peaks.")
self.assertEqual(inst.extension[2].url, "http://example.org/do-not-use/fhir-extensions/referral#fulfillingEncounter")
self.assertEqual(inst.id, "referral")
self.assertEqual(inst.modifierExtension[0].url, "http://example.org/do-not-use/fhir-extensions/referral#referredForService")
self.assertEqual(inst.modifierExtension[0].valueCodeableConcept.coding[0].code, "11429006")
self.assertEqual(inst.modifierExtension[0].valueCodeableConcept.coding[0].display, "Consultation")
self.assertEqual(inst.modifierExtension[0].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.modifierExtension[1].url, "http://example.org/do-not-use/fhir-extensions/referral#targetDate")
self.assertEqual(inst.modifierExtension[1].valuePeriod.end.date, FHIRDate("2013-04-15").date)
self.assertEqual(inst.modifierExtension[1].valuePeriod.end.as_json(), "2013-04-15")
self.assertEqual(inst.modifierExtension[1].valuePeriod.start.date, FHIRDate("2013-04-01").date)
self.assertEqual(inst.modifierExtension[1].valuePeriod.start.as_json(), "2013-04-01")
self.assertEqual(inst.modifierExtension[2].url, "http://example.org/do-not-use/fhir-extensions/referral#status")
self.assertEqual(inst.modifierExtension[2].valueCode, "complete")
self.assertEqual(inst.text.status, "generated")
def testBasic3(self):
inst = self.instantiate_from("basic-example2.json")
self.assertIsNotNone(inst, "Must have instantiated a Basic instance")
self.implBasic3(inst)
js = inst.as_json()
self.assertEqual("Basic", js["resourceType"])
inst2 = basic.Basic(js)
self.implBasic3(inst2)
def implBasic3(self, inst):
self.assertEqual(inst.code.coding[0].code, "UMLCLASSMODEL")
self.assertEqual(inst.code.coding[0].system, "http://example.org/do-not-use/fhir-codes#resourceTypes")
self.assertEqual(inst.extension[0].extension[0].url, "name")
self.assertEqual(inst.extension[0].extension[0].valueString, "Class1")
self.assertEqual(inst.extension[0].extension[1].extension[0].url, "name")
self.assertEqual(inst.extension[0].extension[1].extension[0].valueString, "attribute1")
self.assertEqual(inst.extension[0].extension[1].extension[1].url, "minOccurs")
self.assertEqual(inst.extension[0].extension[1].extension[1].valueInteger, 1)
self.assertEqual(inst.extension[0].extension[1].extension[2].url, "maxOccurs")
self.assertEqual(inst.extension[0].extension[1].extension[2].valueCode, "*")
self.assertEqual(inst.extension[0].extension[1].url, "attribute")
self.assertEqual(inst.extension[0].extension[2].extension[0].url, "name")
self.assertEqual(inst.extension[0].extension[2].extension[0].valueString, "attribute2")
self.assertEqual(inst.extension[0].extension[2].extension[1].url, "minOccurs")
self.assertEqual(inst.extension[0].extension[2].extension[1].valueInteger, 0)
self.assertEqual(inst.extension[0].extension[2].extension[2].url, "maxOccurs")
self.assertEqual(inst.extension[0].extension[2].extension[2].valueInteger, 1)
self.assertEqual(inst.extension[0].extension[2].url, "attribute")
self.assertEqual(inst.extension[0].url, "http://example.org/do-not-use/fhir-extensions/UMLclass")
self.assertEqual(inst.id, "classModel")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | b8e61cd01c820c5da1f3d876cec99857 | 53.419048 | 149 | 0.693385 | 3.433894 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/50d9eeb498c3_genomic_contamination_records.py | 1 | 2402 | """genomic contamination records
Revision ID: 50d9eeb498c3
Revises: 2d20e3ee280a
Create Date: 2020-12-17 15:25:04.596627
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.genomic_enums import GenomicJob
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '50d9eeb498c3'
down_revision = '2d20e3ee280a'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('genomic_sample_contamination',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('sample_id', sa.String(length=80), nullable=False),
sa.Column('failed_in_job', rdr_service.model.utils.Enum(GenomicJob), nullable=False),
sa.ForeignKeyConstraint(['sample_id'], ['biobank_stored_sample.biobank_stored_sample_id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('genomic_sample_contamination')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 5440a50620de2af18bdaa7f5e392cbe3 | 33.811594 | 125 | 0.743547 | 3.537555 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/resource/generators/retention_metrics.py | 1 | 1965 | #
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
import logging
from sqlalchemy.sql import text
from werkzeug.exceptions import NotFound
from rdr_service.dao.resource_dao import ResourceDataDao
from rdr_service.resource import generators, schemas
from rdr_service.resource.constants import RetentionStatusEnum, RetentionTypeEnum
class RetentionEligibleMetricGenerator(generators.BaseGenerator):
"""
Generate a Retention Metric resource object
"""
ro_dao = None
def make_resource(self, p_id, backup=False):
"""
Build a resource object from the given primary key id.
:param p_id: Participant ID.
:param backup: if True, get from backup database instead of Primary.
:return: resource object
"""
if not self.ro_dao:
self.ro_dao = ResourceDataDao(backup=backup)
with self.ro_dao.session() as ro_session:
row = ro_session.execute(
text('select * from retention_eligible_metrics where participant_id = :pid'), {'pid': p_id}).first()
data = self.ro_dao.to_dict(row)
if not data:
msg = f'Participant P{p_id} not found in retention_eligible_metrics table.'
logging.error(msg)
raise NotFound(msg)
# Populate Enum fields. Note: When Enums have a possible zero value, explicitly check for None.
if data['retention_eligible_status'] is not None:
data['retention_eligible_status'] = data['retention_eligible_status_id'] = \
RetentionStatusEnum(data['retention_eligible_status'])
if data['retention_type'] is not None:
data['retention_type'] = data['retention_type_id'] = RetentionTypeEnum(data['retention_type'])
return generators.ResourceRecordSet(schemas.RetentionMetricSchema, data)
| bsd-3-clause | 3d0367265dce8e4a5313ec7a1e0f9def | 39.9375 | 120 | 0.654962 | 4.189765 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/tools/tool_libs/consent_validation_report.py | 1 | 61856 | #! /bin/env python
#
# Temporary tool for manually generating consent validation metrics (until it can be automated by dashboard team)
# Also creates CSV files for PTSC with information about consent errors.
#
import argparse
# pylint: disable=superfluous-parens
# pylint: disable=broad-except
import logging
import sys
import os
import csv
import gspread
import gspread_formatting as gsfmt
import pandas
from datetime import date, datetime, timedelta
from gspread.utils import rowcol_to_a1
from rdr_service.services.system_utils import setup_logging, setup_i18n
from rdr_service.services.gcp_config import RdrEnvironment
from rdr_service.tools.tool_libs import GCPProcessContext, GCPEnvConfigObject
from rdr_service.services.gcp_utils import gcp_get_iam_service_key_info
from rdr_service.model.consent_file import ConsentSyncStatus, ConsentType, ConsentOtherErrors
_logger = logging.getLogger("rdr_logger")
# Tool_cmd and tool_desc name are required.
# Remember to add/update bash completion in 'tool_lib/tools.bash'
tool_cmd = "consent-report"
tool_desc = "Publish consent validation metrics to a google sheets doc and/or create a CSV with consent error details"
# This list matches the names of the column names / calculated fields returned from the custom SQL query that pulls
# data from the RDR consent_file table.
TRACKED_CONSENT_ERRORS = [
'missing_file',
'signature_missing',
'invalid_signing_date',
'invalid_dob',
'invalid_age_at_consent',
'checkbox_unchecked',
'non_va_consent_for_va',
'va_consent_for_non_va'
]
# The PTSC CSV file has some identifying information columns about consents with errors, plus the error status columns
PTSC_CSV_COLUMN_HEADERS = ['participant_id', 'type', 'file_path', 'file_upload_time'] + TRACKED_CONSENT_ERRORS
# Tuples with column header text and column number (1-based) for generating consent error count sections
CONSENT_ERROR_COUNT_COLUMNS = [
('Consent Type', 3),
('Expected', 4),
('Ready to Sync', 5),
('Participants With Unresolved Issues', 6),
('Consent Files With Errors', 7),
('Total Errors', 8),
('Missing File', 9),
('Signature Missing', 10),
('Signature Date Invalid', 11),
('Invalid DOB', 12),
('Age at Primary Consent < 18', 13),
('Checkbox Unchecked', 14),
('Non-VA Consent for VA Participant', 15),
('VA Consent for Non-VA Participant', 16)
]
# Maps the currently validated consent types to the related status/authored fields to query from participant_summary
CONSENT_PARTICIPANT_SUMMARY_FIELDS = {
# For PRIMARY: use earliest consent authored (to distinguish from PrimaryConsentUpdate authored, which are not
# yet included in the validation)
ConsentType.PRIMARY : ('consent_for_study_enrollment', 'consent_for_study_enrollment_first_yes_authored'),
ConsentType.CABOR: ('consent_for_cabor', 'consent_for_cabor_authored'),
ConsentType.EHR: ('consent_for_electronic_health_records',
'consent_for_electronic_health_records_first_yes_authored'),
ConsentType.GROR: ('consent_for_genomics_ror', 'consent_for_genomics_ror_authored'),
ConsentType.PRIMARY_UPDATE: ('consent_for_study_enrollment', 'consent_for_study_enrollment_authored')
}
# List of currently validated consent type values as ints, for pandas filtering of consent_file.type values
CONSENTS_LIST = [int(v) for v in CONSENT_PARTICIPANT_SUMMARY_FIELDS.keys()]
# Raw SQL used initially for fast prototyping of reports. These reports will be taken over by dashboard team
CONSENT_REPORT_SQL_BODY = """
SELECT cf.participant_id,
ps.date_of_birth,
ps.participant_origin,
CASE
WHEN (h.name IS NOT NULL and h.name != 'UNSET') THEN h.name
ELSE '(Unpaired)'
END AS hpo,
CASE
WHEN o.display_name IS NOT NULL THEN o.display_name ELSE '(No organization pairing)'
END AS organization,
DATE(ps.{authored_field}) AS consent_authored_date,
cf.sync_status,
cf.type,
cf.file_path,
cf.file_upload_time,
-- Adding signing date details to data pull to support new filtering logic on the results
cf.signing_date,
cf.expected_sign_date,
CASE WHEN cf.sync_status = 3 then DATE(cf.modified) ELSE NULL END AS resolved_date,
-- Calculated fields to generate 0 or 1 values for the known tracked error conditions
-- (1 if error found)
NOT cf.file_exists AS missing_file,
(cf.file_exists and NOT is_signature_valid) AS signature_missing,
(cf.is_signature_valid and NOT cf.is_signing_date_valid) AS invalid_signing_date,
-- Invalid DOB conditions: DOB missing, DOB before defined cutoff, DOB in the future, or
-- DOB later than the consent authored date
(ps.date_of_birth is null or ps.date_of_birth > "{report_date}"
or (ps.consent_for_study_enrollment_first_yes_authored is not null
and TIMESTAMPDIFF(YEAR, ps.consent_for_study_enrollment_first_yes_authored,
ps.date_of_birth) > 124))
AS invalid_dob,
TIMESTAMPDIFF(YEAR, COALESCE(ps.date_of_birth, CURRENT_DATE),
ps.consent_for_study_enrollment_first_yes_authored) < 18
AS invalid_age_at_consent,
-- Map the text for other errors we know about to its TRACKED_CONSENT_ERRORS name
(cf.file_exists AND cf.other_errors LIKE "%{missing_check_mark}%") AS checkbox_unchecked,
(cf.file_exists AND cf.other_errors LIKE "%{non_va_for_va}")
AS non_va_consent_for_va,
(cf.file_exists AND cf.other_errors LIKE "%{va_for_non_va}%")
AS va_consent_for_non_va
FROM consent_file cf
JOIN participant_summary ps on cf.participant_id = ps.participant_id
JOIN participant p on p.participant_id = ps.participant_id
AND p.is_test_participant = 0 and (p.is_ghost_id is null or not p.is_ghost_id) and p.hpo_id != 21
LEFT OUTER JOIN hpo h ON p.hpo_id = h.hpo_id
LEFT OUTER JOIN organization o on ps.organization_id = o.organization_id
"""
# Daily report filter for validation results on all newly received and validated consents:
# - For each consent type, filter on participants whose consent authored date for that consent matches the report date
# and where the consent status in participant_summary is SUBMITTED (1) -- the validation process is only interested
# in newly authored "yes"/SUBMITTED consents
# - Find corresponding consent_file entries for the consent type, in NEEDS_CORRECTING/READY_TO_SYNC/SYNC_COMPLETE
DAILY_CONSENTS_SQL_FILTER = """
WHERE cf.type = {consent_type}
AND ps.{status_field} = 1
AND DATE(ps.{authored_field}) = "{report_date}"
AND cf.sync_status IN (1,2,4)
"""
# -- Weekly report queries --
# Filter to produce a report of all remaining NEEDS_CORRECTING consents of a specified type, up to and including the
# specified end date for this report
ALL_UNRESOLVED_ERRORS_SQL_FILTER = """
WHERE cf.type = {consent_type}
AND DATE(cf.created) <= "{validation_end_date}"
AND cf.sync_status = 1
"""
# Filter for generating stats on file issues resolved by retransmission (OBSOLETE consent_file entries)
# The last modified timestamp for an OBSOLETE record should reflect when it was moved into OBSOLETE status; include
# resolutions up to the specified end date for this report.
ALL_RESOLVED_SQL = """
SELECT cf.participant_id,
cf.id,
ps.participant_origin,
cf.type,
DATE(cf.modified) AS resolved_date
FROM consent_file cf
-- Alias to ps to be consistent with other queries that use common filters
JOIN participant ps on ps.participant_id = cf.participant_id
WHERE cf.sync_status = 3
AND DATE(cf.modified) <= "{validation_end_date}"
AND ps.is_test_participant = 0 and (ps.is_ghost_id is null or ps.is_ghost_id = 0) and ps.hpo_id != 21
"""
ORIGIN_SQL_FILTER = ' AND ps.participant_origin = "{origin_filter}"'
# Weekly report SQL for validation burndown counts
CONSENTED_PARTICIPANTS_COUNT_SQL = """
SELECT COUNT(DISTINCT ps.participant_id) consented_participants
FROM participant_summary ps
JOIN participant p ON p.participant_id = ps.participant_id
AND p.is_test_participant = 0 and (p.is_ghost_id is null or not p.is_ghost_id) and p.hpo_id != 21
WHERE DATE(ps.consent_for_study_enrollment_first_yes_authored) <= "{end_date}"
"""
VALIDATED_PARTICIPANTS_COUNT_SQL = """
SELECT COUNT(DISTINCT cf.participant_id) validated_participants
FROM consent_file cf
JOIN participant ps ON ps.participant_id = cf.participant_id
AND ps.is_test_participant = 0 and (ps.is_ghost_id is null or not ps.is_ghost_id) and ps.hpo_id != 21
WHERE DATE(cf.created) <= "{end_date}"
"""
# Define the allowable --report-type arguments
REPORT_TYPES = ['daily_uploads', 'weekly_status']
class SafeDict(dict):
"""
See: https://stackoverflow.com/questions/17215400/format-string-unused-named-arguments
Used with str.format_map() to allow partial formatting/replacement of placeholder values in a string
without incurring KeyError. E.g.: '{lastname}, {firstname} {lastname}'.format_map(SafeDict(lastname='Bond'))
yields the partially formatted result: 'Bond, {firstname} Bond'
This helper class will be used when formatting SQL template strings that need to have some of their placeholders
filled in before the values for the others can be determined.
"""
def __missing__(self, key):
# Return a string with the key string value enclosed in {}
return f'{{{key}}}'
class ConsentReport(object):
""""
The ConsentReport class will contain attributes and methods common to both the daily consent validation report
and the weekly consent validation status report.
"""
def __init__(self, args, gcp_env: GCPEnvConfigObject):
"""
:param args: command line arguments.
:param gcp_env: gcp environment information, see: gcp_initialize().
"""
self.args = args
self.gcp_env = gcp_env
self.db_conn = None
if not args.report_type in REPORT_TYPES:
raise ValueError(f'invalid report type option: {args.report_type}')
else:
self.report_type = args.report_type
# Defaults, overridden as needed by child classes
self.worksheet = None
self.sheet_rows = 500
# The column indexes/numbers are the second element in the CONSENT_ERROR_COUNT_COLUMN tuples. Get the column
# count for the sheet by finding the max column number
self.sheet_cols = max([column_tuple[1] for column_tuple in CONSENT_ERROR_COUNT_COLUMNS])
# A pandas dataframe to be populated with results of the specific report (daily or weekly) SQL query
self.consent_df = None
# A participant origin filter value applied to queries
self.origin_value = None
# Position tracker updated as content is added to the report worksheet
self.row_pos = 1
# Lists appended to as the report content is generated, containing the cell data and the cell formatting
# The resulting data will be written out in a gspread batch_update() call, and the formatting will be applied
# via a gspread-formatting format_cell_ranges() batch call
self.report_data = list()
self.report_formatting = list()
# Commonly used cell formats for the consent reports, applied via gspread-formatting module (imported as gsfmt)
# See https://libraries.io/pypi/gspread-formatting for information on its implementation of
# googlesheets v4 API CellFormat classes and how to nest its classes
self.format_specs = {
'bold_text': gsfmt.cellFormat(textFormat=gsfmt.textFormat(bold=True, fontSize=12)),
'bold_small_wrapped': gsfmt.cellFormat(textFormat=gsfmt.textFormat(bold=True, fontSize=9),
wrapStrategy='WRAP'),
'italic_text': gsfmt.cellFormat(textFormat=gsfmt.textFormat(italic=True, fontSize=12)),
'legend_text': gsfmt.cellFormat(textFormat=gsfmt.textFormat(fontSize=10,italic=True,
foregroundColor=gsfmt.color(0, 0, 1))),
'column_header': gsfmt.cellFormat(textFormat=gsfmt.textFormat(bold=True),
wrapStrategy='WRAP',
verticalAlignment='MIDDLE'),
'count_section_header_row': gsfmt.cellFormat(textFormat=gsfmt.textFormat(bold=True),
wrapStrategy='WRAP',
backgroundColor=gsfmt.color(0.02, 0.8, 0.4),
verticalAlignment='MIDDLE',
borders=gsfmt.borders(top=gsfmt.border('SOLID_MEDIUM'),
bottom=gsfmt.border('SOLID_MEDIUM'))),
'solid_border': gsfmt.cellFormat(borders=gsfmt.borders(top=gsfmt.border('SOLID'))),
'solid_thick_border': gsfmt.cellFormat(borders=gsfmt.borders(bottom=gsfmt.border('SOLID_THICK')))
}
def _set_origin_value(self, origin):
""" Save an origin value to use on queries """
self.origin_value = origin
def _clear_report(self):
""" Clear the report_data """
self.report_data.clear()
self.report_formatting.clear()
self.row_pos = 1
def _add_format_spec(self, fmt_name_key: str, fmt_spec : gsfmt.CellFormat):
""" Add a new format spec to the instance format_specs list """
if not len(fmt_name_key) or not isinstance(fmt_spec, gsfmt.CellFormat):
raise (ValueError, "Invalid format specification data")
else:
self.format_specs[fmt_name_key] = fmt_spec
def _connect_to_rdr_replica(self):
""" Establish a connection to the replica RDR database for reading consent validation data """
self.gcp_env.activate_sql_proxy(replica=True)
self.db_conn = self.gcp_env.make_mysqldb_connection()
def _has_needs_correcting(self, dframe):
""" Check if the dataframe provided has any records in a NEEDS_CORRECTING state """
return (dframe.loc[dframe.sync_status == int(ConsentSyncStatus.NEEDS_CORRECTING)].shape[0] > 0)
def _make_a1_notation(self, start_row, start_col=1, end_row=None, end_col=None):
"""
Use the rowcol_to_a1() gspread method to construct an A1 cell range notation string
A starting row position is required. A starting col of 1 is the presumed default. If no ending row/col is
provided, then assume the ending position is the same row and/or column
Returns: a string such as 'A1:A1' (single cell), 'A5:N5' (multiple columns on the same row), etc.
"""
# Assume single row / single col if no ending coordinate is provided
end_row = end_row or start_row
end_col = end_col or start_col
# Sanity check on row and column values vs. defined spreadsheet dimensions
if start_row > self.sheet_rows or end_row > self.sheet_rows:
raise ValueError(f'Row value exceeds maximum of {self.sheet_rows}')
if start_col > self.sheet_cols or end_col > self.sheet_cols:
raise ValueError(f'Column value exceeds maximum of {self.sheet_cols}')
return ''.join([rowcol_to_a1(start_row, start_col), ':', rowcol_to_a1(end_row, end_col)])
@staticmethod
def format_number(number):
""" Return a number value formatted with commas """
return f'{number:8,}'
def _add_report_rows(self, cell_range, value_list=[]):
"""
Adds to the list of report_data elements that will be passed to gspread batch_update().
Example of a data element dict:
{ 'range': 'A1:N5',
'values': [[<cell values for row 1 A1:N1 columns], ..., [<cell values for row 5 A5:N5 columns]]
}
"""
if not cell_range or not len(value_list):
raise(ValueError, "Invalid data object for spreadsheet")
self.report_data.append({
'range': cell_range,
'values': value_list
})
def _add_report_formatting(self, cell_range: str, fmt_spec: gsfmt.CellFormat):
"""
Adds an element to a list of formatting spec elements that will be passed to gspread-formatting
format_cell_ranges()
See: https://libraries.io/pypi/gspread-formatting
"""
self.report_formatting.append((cell_range, fmt_spec))
def _add_text_rows(self, text_rows=[], format_spec=None, row_pos=None):
"""
Add a row or rows with the requested text (e.g., Report Date line, Notes, etc.) to the report content
"""
if not row_pos:
row_pos = self.row_pos
end_of_text_pos = row_pos + len(text_rows)
cell_range = self._make_a1_notation(row_pos, end_row=end_of_text_pos)
self._add_report_rows(cell_range, text_rows)
if format_spec:
self._add_report_formatting(cell_range, format_spec)
self.row_pos = end_of_text_pos
def _add_consent_issue_count_header_section(self, row_pos=None, hpo=''):
"""
Builds a counts section shaded header row with the all the column headers. This section header is used
for both the aggregate (all entities) counts, as well as for each section when the error counts are broken down
by HPO/Org
"""
if not row_pos:
row_pos = self.row_pos
# The column header string is the first element of each tuple in the CONSENT_ERROR_COUNT_COLUMNS tuple list
count_headers = [column_tuple[0] for column_tuple in CONSENT_ERROR_COUNT_COLUMNS]
# Kludge: minor customization of otherwise shared data between daily and weekly reports.
if self.report_type == 'weekly_status':
# Drop the Expected and Ready to Sync columns; aren't helpful to show in weekly report, which is tracking
# consents with errors only
count_headers = [h for h in count_headers if h not in ['Expected', 'Ready to Sync']]
# Column A has HPO name, Column B intentionally blank, then add the rest of the error count columns
hpo_header_row = [hpo, ''] + count_headers
cell_range = self._make_a1_notation(row_pos, end_col=self.sheet_cols)
# Add this single header row and its formatting to the report content
self._add_report_rows(cell_range, [hpo_header_row])
self._add_report_formatting(cell_range, self.format_specs.get('count_section_header_row'))
self.row_pos = row_pos + 1
def _add_consent_issue_counts(self, df, row_pos=None, org=None, show_all_counts=False):
"""
Builds and populates a subsection of rows, with one row per consent type, indicating its status/error counts
:param df: The dataframe to operate on. This could be data for all entities to generate overall counts, or
it could be a dataframe filtered by organization for the organization-specific counts
:param show_all_counts: Set to True by caller if lines for consents with 0 error counts should be shown
"""
if not row_pos:
row_pos = self.row_pos
# Track if we've already generated a row containing the organization name. It's only included with the first
# line / first consent that has associated errors
org_string_written = False
for consent in CONSENTS_LIST:
expected_count = df.loc[df.type == consent].shape[0]
# Won't generate report rows for consents that had no entries in the validation results
if not expected_count:
continue
ready_count = df.loc[(df.type == consent)\
& (df.sync_status != int(ConsentSyncStatus.NEEDS_CORRECTING))].shape[0]
# Create a filtered dataframe of records for this consent in NEEDS_CORRECTING status, for further analysis
consents_with_errors = df.loc[(df.type == consent)\
& (df.sync_status == int(ConsentSyncStatus.NEEDS_CORRECTING))].reset_index()
consent_error_count = consents_with_errors.shape[0]
# Count of distinct (pandas nunique() = number of unique) participant_id values having NEEDS_CORRECTING:
participant_count = consents_with_errors['participant_id'].nunique()
if not consent_error_count and not show_all_counts:
# No errors/nothing to report for this consent type
continue
# The organization name (bolded) only appears in Column A for the first row generated.
# Column B is intentionally blank
if org and not org_string_written:
row_values = [org, '']
self._add_report_formatting(self._make_a1_notation(row_pos),
self.format_specs.get('bold_small_wrapped'))
org_string_written = True
else:
row_values = ['', '']
# Kludge: Some minor customization of otherwise mostly shared data between daily and weekly reports
if self.report_type == 'weekly_status':
# Weekly outstanding issues report does not have Expected / Ready to Sync columns
row_values.extend([str(ConsentType(consent)),
self.format_number(int(participant_count)),
self.format_number(consent_error_count)])
else:
row_values.extend([ str(ConsentType(consent)),
self.format_number(expected_count),
self.format_number(ready_count),
self.format_number(int(participant_count)),
self.format_number(consent_error_count)])
tracked_error_values = []
total_errors = 0
if consent_error_count:
for error in TRACKED_CONSENT_ERRORS:
if error in ['invalid_dob', 'invalid_age_at_consent'] and consent != int(ConsentType.PRIMARY):
# DOB issues only apply for PRIMARY consent
error_count = 0
else:
# Pandas: sum all the values in error type column (will be 0 or 1). Cast result from float
error_count = int(consents_with_errors[error].sum())
if error_count:
tracked_error_values.append(self.format_number(error_count))
total_errors += error_count
else:
# Suppress writing 0s to the spreadsheet individual error columns, for better readability.
# Only columns with an error count to report will have values in them.
tracked_error_values.append(None)
row_values.append(self.format_number(total_errors))
row_values.extend(tracked_error_values)
self._add_report_rows(self._make_a1_notation(row_pos, end_col=len(row_values)), [row_values])
row_pos += 1
self.row_pos = row_pos
def _add_errors_by_org(self, df=None):
""""
Generate HPO/Organization-specific breakdowns of the consent error metrics.
Only organizations for which there were associated errors will be included in the report output.
"""
if df is None:
df = self.consent_df
# Iterate over list of distinct (pandas: unique() ) HPO names in the dataframe
hpos = df['hpo'].unique()
for hpo in sorted(hpos):
hpo_df = df[df.hpo == hpo] # Yields an HPO-filtered dataframe
if self._has_needs_correcting(hpo_df):
self._add_consent_issue_count_header_section(hpo=hpo, row_pos=self.row_pos + 1)
# Iterate over distinct organizations in the HPO dataframe and build error report for any org having
# records in NEEDS_CORRECTING status
orgs = hpo_df['organization'].unique()
for org in sorted(orgs):
org_df = hpo_df[hpo_df.organization == org] # Yields an Org-filtered dataframe from the HPO frame
if self._has_needs_correcting(org_df):
# Visual border to separate from the previous organization subsection
self._add_report_formatting(self._make_a1_notation(self.row_pos, end_col=self.sheet_cols),
self.format_specs.get('solid_border'))
self._add_consent_issue_counts(org_df, org=org, row_pos=self.row_pos)
# Draw final border for the entire HPO section after all the organization subsections are generated
self._add_report_formatting(self._make_a1_notation(self.row_pos - 1, end_col=self.sheet_cols),
self.format_specs.get('solid_thick_border'))
def _remove_potential_false_positives_for_consent_version(self, df):
"""
Found some cases where the validation on the consent files may have run before the participant pairing was
completed. This was resulting in some potential false positives for va_consent_for_non_va errors. For now,
ignore any NEEDS_CORRECTING records where participant is currently paired to VA HPO, and the only error flagged
was va_consent_for_non_va
"""
# Pandas: find all the records we want to keep and make a new dataframe out of the result. Inverts the
# "and" conditions above for the known false positives in order to find everything but those records
filtered_df = df.loc[(df.sync_status != int(ConsentSyncStatus.NEEDS_CORRECTING)) |\
(df.hpo != 'VA') | (df.va_consent_for_non_va == 0) |\
(df.missing_file == 1) | (df.invalid_dob == 1) | (df.invalid_age_at_consent == 1) |\
(df.checkbox_unchecked == 1) | (df.non_va_consent_for_va == 1)]
print(f'Filtered count for consent version false positives: {df.shape[0] - filtered_df.shape[0]}')
return filtered_df
def _get_consent_validation_dataframe(self, sql_template):
"""
Queries the RDR participant summary/consent_file tables for entries of each consent type for which validation
has been implemented, and merges the results into a single pandas dataframe
:param sql_template A SQL string with {authored_field}, {status_field}, and {consent_type} placeholders
that will be filled in as data for each consent type is queried
"""
if not self.db_conn:
raise (EnvironmentError, 'No active DB connection object')
df = pandas.DataFrame()
for consent_int in CONSENTS_LIST:
sql = sql_template
# The tuple retrieved from the CONSENT_PARTICIPANT_SUMMARY_FIELDS dict has two elements like:
# ('consent_for_study_enrollment', 'consent_for_study_enrollment_first_yes_authored')
consent_status_field = CONSENT_PARTICIPANT_SUMMARY_FIELDS[ConsentType(consent_int)][0]
consent_authored_field = CONSENT_PARTICIPANT_SUMMARY_FIELDS[ConsentType(consent_int)][1]
sql = sql.format_map(SafeDict(consent_type=consent_int,
status_field=consent_status_field,
authored_field=consent_authored_field,
missing_check_mark=ConsentOtherErrors.MISSING_CONSENT_CHECK_MARK,
non_va_for_va=ConsentOtherErrors.NON_VETERAN_CONSENT_FOR_VETERAN,
va_for_non_va=ConsentOtherErrors.VETERAN_CONSENT_FOR_NON_VETERAN
))
consent_df = pandas.read_sql_query(sql, self.db_conn)
# Replace any null values in the calculated error flag columns with (uint8 vs. pandas default float) zeroes
for error_type in TRACKED_CONSENT_ERRORS:
consent_df = consent_df.fillna({error_type: 0}).astype({error_type: 'uint8'})
df = df.append(consent_df)
# Temporary? Attempt to filter false positives for va_consent_for_non_va consent version errors out of the
# generated dataframe
df = self._remove_potential_false_positives_for_consent_version(df)
return df
def _write_report_content(self):
""" Make the batch calls to add all the cell data and apply the formatting to the spreadsheet """
self.worksheet.batch_update(self.report_data)
gsfmt.format_cell_ranges(self.worksheet, self.report_formatting)
class DailyConsentReport(ConsentReport):
"""
Class to implement the generation of the daily consent validation report for newly authored consents, and a
CSV file with error details if errors were detected
"""
def __init__(self, args, gcp_env: GCPEnvConfigObject):
super().__init__(args, gcp_env)
if args.doc_id:
self.doc_id = args.doc_id
else:
self.doc_id = os.environ['DAILY_CONSENT_DOC_ID']
if not self.doc_id:
raise ValueError('Please use the --doc-id arg or export DAILY_CONSENT_DOC_ID environment var')
if args.report_date:
self.report_date = args.report_date
else:
# Default to yesterday's date as the filter for consent authored date
self.report_date = datetime.now() - timedelta(days=1)
self.report_sql = CONSENT_REPORT_SQL_BODY + DAILY_CONSENTS_SQL_FILTER + ORIGIN_SQL_FILTER
# Max columns for the daily sheet (max column index value from the CONSENT_ERROR_COUNT_COLUMNS tuples)
self.sheet_cols = max([column[1] for column in CONSENT_ERROR_COUNT_COLUMNS])
# Number of days/worksheets to archive in the file (will do rolling deletion of oldest daily worksheets/tabs)
self.max_daily_reports = 63 # A month's worth for both PTSC and CE + an extra sheet for Notes/legend
self.consent_errors_found = False
def create_csv_errors_file(self):
"""
Generate a CSV file with the consent error details (originally just for PTSC).
TODO: May need to simultaneously create two CSV files once we add validations for CE? Unless API is done?
"""
errors_df = self.consent_df[self.consent_df.sync_status == int(ConsentSyncStatus.NEEDS_CORRECTING)]
# Initialize the list of lists which will be passed to CSV writer writerows(), with the first row of headers
output_rows = [PTSC_CSV_COLUMN_HEADERS]
# iterrows() allows us to iterate through the dataframe similar to a result set of records. It also returns
# an index, which is unused here and replaced with _ to keep pylint happy
for _, df_row in errors_df.iterrows():
# If file_path was null/file was missing, coerce file details to empty strings
if not df_row['file_path']:
file_path = file_upload_time = ''
else:
file_path = df_row['file_path']
file_upload_time = df_row['file_upload_time']
# Generating values at the start of each CSV line such as:
# P111111111,GROR,ptc-uploads-all-of-us-prod/Participant/P11111111/GROR__000.pdf,2021-08-10 01:38:21,...
csv_values = [
'P' + str(df_row['participant_id']),
str(ConsentType(df_row['type'])),
file_path,
file_upload_time
]
# Add the 0/1 values for each of the row's error flag fields
for error_type in TRACKED_CONSENT_ERRORS:
csv_values.append(df_row[error_type])
output_rows.append(csv_values)
# Write out the csv file to the local directory
output_file = f'{self.origin_value}_{self.report_date.strftime("%Y%m%d")}_consent_errors.csv'
_logger.info(f'Writing errors to {output_file}...')
with open(output_file, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(output_rows)
def add_daily_summary(self):
""" Add content that appears on every daily consent validation report regardless of errors """
auth_date = self.report_date.strftime("%b %-d, %Y")
now = datetime.now().strftime("%x %X")
report_title = ' '.join([
f'Report for {self.origin_value} participant consents authored on: {auth_date} 12:00AM-11:59PM UTC',
f'(generated on {now} Central)'
])
report_notes = [
['Notes:'],
[f'Validation details on this sheet for {self.origin_value} participants only'],
['Checkbox validation currently only performed on GROR consents'],
['Total Errors can exceed Consents with Errors if any consents had multiple validation errors']
]
self._add_text_rows(text_rows=[[report_title]], format_spec=self.format_specs.get('bold_text'))
# Add any explanatory text / details about the report that have been included in the layout
self._add_text_rows(text_rows=report_notes, format_spec=self.format_specs.get('legend_text'),
row_pos=self.row_pos + 1)
if not self._has_needs_correcting(self.consent_df):
self._add_text_rows(text_rows=[['No consent validation errors detected']],
format_spec=self.format_specs.get('italic_text'), row_pos=self.row_pos+1)
# Daily summary counts for all the recently authored consents that were processed (regardless of errors)
self._add_text_rows([['Total Consent Validation Counts']],
format_spec=self.format_specs.get('bold_text'), row_pos=self.row_pos+1)
self._add_consent_issue_count_header_section(hpo='All Entities')
self._add_consent_issue_counts(self.consent_df, show_all_counts=True)
def create_daily_report(self, spreadsheet):
"""
Add a new daily report tab/sheet to the google sheet file, with the validation details for that date
"""
if not self.args.csv_only:
existing_sheets = spreadsheet.worksheets()
# Perform rolling deletion of the oldest reports so we keep a pre-defined maximum number of daily reports
# NOTE: this assumes all the reports in the file were generated in order, with the most recent date at the
# leftmost tab (index 0). This deletes sheets from the existing_sheets list, starting at the rightmost tab
for ws_index in range(len(existing_sheets), self.max_daily_reports-1, -1):
spreadsheet.del_worksheet(existing_sheets[ws_index-1])
# Add the new worksheet (to leftmost tab position / index 0)
self.worksheet = spreadsheet.add_worksheet(f'{self.origin_value} {self.report_date.strftime("%b %d %Y")}',
rows=self.sheet_rows,
cols=self.sheet_cols,
index=1)
self.add_daily_summary()
if self._has_needs_correcting(self.consent_df):
if not self.args.csv_only:
# Google sheets doesn't have flexible/multiple freezing options. Freeze all rows above the current
# position. Makes so HPO/Org-specific section(s) scrollable while still seeing column header names
self.worksheet.freeze(rows=self.row_pos - 1)
self._add_text_rows(text_rows=[['Consent errors by HPO/Organization']],
format_spec=self.format_specs.get('bold_text'))
self._add_errors_by_org()
if not self.args.sheet_only:
self.create_csv_errors_file()
else:
_logger.info('No errors to report')
self._write_report_content()
def execute(self):
"""
Execute the DailyConsentReport builder
"""
# Set up DB and googlesheets doc access
self._connect_to_rdr_replica()
service_key_info = gcp_get_iam_service_key_info(self.gcp_env.service_key_id)
gs_creds = gspread.service_account(service_key_info['key_path'])
gs_file = gs_creds.open_by_key(self.doc_id)
# These strings converted to all lowercase when used as SQL query filters
for origin in ['Vibrent', 'CareEvolution']:
self._set_origin_value(origin)
# Retrieve the daily data and build the report. Partial string substitution for the SQL statments is done
# here; the remaining substitutions occur in the _get_consent_validation_dataframe() method
self.consent_df = self._get_consent_validation_dataframe(
self.report_sql.format_map(SafeDict(report_date=self.report_date.strftime("%Y-%m-%d"),
origin_filter=self.origin_value.lower()))
)
self.create_daily_report(gs_file)
_logger.info(f'{self.origin_value} Daily report complete')
self._clear_report()
class WeeklyConsentReport(ConsentReport):
"""
Class to implement the weekly consent validation status report, which includes details of all retrospective
validation errors that are still pending resolution
"""
def __init__(self, args, gcp_env: GCPEnvConfigObject):
super().__init__(args, gcp_env)
if args.doc_id:
self.doc_id = args.doc_id
else:
self.doc_id = os.environ['WEEKLY_CONSENT_DOC_ID']
if not self.doc_id:
raise ValueError('Please use the --doc-id arg or export WEEKLY_CONSENT_DOC_ID environment var')
# Default to yesterday's date as the end of the weekly report range, and a week prior to that as start date
self.end_date = args.end_date or (datetime.now() - timedelta(days=1))
self.start_date = args.start_date or (self.end_date - timedelta(days=7))
# When looking for unresolved records for the weekly report, the consent_file.created date could be a day
# later than the end_date range for the authored dates.
self.validation_end_date = self.end_date + timedelta(days=1)
self.report_date = datetime.now()
self.report_sql = CONSENT_REPORT_SQL_BODY + ALL_UNRESOLVED_ERRORS_SQL_FILTER + ORIGIN_SQL_FILTER
self.sheet_rows = 800
# Number of worksheets to archive in the file (will do rolling deletion of oldest weekly worksheets/tabs)
self.max_weekly_reports = 17 # Two month's worth for both PTSC and CE + an extra sheet for Notes/Legend details
self.consent_errors_found = False
# Additional dataframe (after self.consent_df) that will hold results from querying resolved/OBSOLETE issues
self.resolved_df = None
# Format specs only used in weekly report
self._add_format_spec('burndown_header_row',
gsfmt.cellFormat(backgroundColor=gsfmt.color(0.87, 0.46, 0),
textFormat=gsfmt.textFormat(bold=True,fontSize=10))
)
self._add_format_spec('burndown_column_headers',
gsfmt.cellFormat(textFormat=gsfmt.textFormat(bold=True, fontSize=9),
backgroundColor=gsfmt.color(1, .84, 0),
wrapStrategy='WRAP',
verticalAlignment='MIDDLE')
)
def remove_potential_false_positives_for_missing_signature(self, df):
"""
A temporary method to ignore NEEDS_CORRECTING consents if they fit a profile observed during retrospective
validation, where we know the PDF validation tool is failing to find valid signing date/signature details.
NEEDS_CORRECTING records should be ignored for now if:
- missing_file field is 0 (file exists) AND
- expected_sign_date < 2018-07-13 AND
- signing_date is null AND
- Has no other tracked error fields set to 1/True (except either signature_missing or invalid_signing_date)
Returns a dataframe with all the records except those that match the above criteria
"""
filter_date = date(year=2018,month=7,day=13)
# Pandas: find all the records we want to keep and make a new dataframe out of the result. Inverts the
# "and" conditions above for the known false positives in order to find everything but those records
filtered_df = df.loc[(df.missing_file == 1) | (df.invalid_dob == 1) | (df.invalid_age_at_consent == 1) |\
(df.checkbox_unchecked == 1) | (df.non_va_consent_for_va == 1) |\
(df.expected_sign_date >= filter_date) | (df.signing_date.isnull() == False)].reset_index()
print(f'Filtered count for signature_missing false positives: {df.shape[0] - filtered_df.shape[0]}')
return filtered_df
def get_resolved_consent_issues_dataframe(self):
"""
Returns a dataframe of all issues marked OBSOLETE up to and including on the report end date. OBSOLETE implies
the file which did not pass validation has been superseded by a new/retransmitted consent file which was
successfully validated. In some cases, a consent_file entry may be marked OBSOLETE after a manual inspection/
issue resolution.
"""
sql = ALL_RESOLVED_SQL + ORIGIN_SQL_FILTER
sql = sql.format_map(SafeDict(validation_end_date=self.validation_end_date.strftime("%Y-%m-%d"),
origin_filter=self.origin_value.lower()))
resolved_df = pandas.read_sql_query(sql, self.db_conn)
# Make sure we only pick up resolved counts for the consent types currently enabled in the CONSENTS_LIST
# See https://www.geeksforgeeks.org/python-pandas-dataframe-isin/ for more details on this pandas construct
filter_mask = resolved_df.type.isin(CONSENTS_LIST)
return resolved_df[filter_mask]
def add_weekly_validation_burndown_section(self):
"""
Creates a summary section tracking progress of retrospective consent validations
Displays counts of the individual participants whose consents were validated with with no issues detected,
participants with unresolved consent file issues, and participants whose consents are yet to be validated
"""
cursor = self.db_conn.cursor()
# Gets a count of non-test/ghost participants with a participant_summary (e.g, RDR got a primary consent),
# if the primary consent authored date was on/before the end date for this report
sql = CONSENTED_PARTICIPANTS_COUNT_SQL + ORIGIN_SQL_FILTER
cursor.execute(sql.format_map(SafeDict(end_date=self.end_date.strftime("%Y-%m-%d"),
origin_filter=self.origin_value.lower())))
consented_count = cursor.fetchone()[0]
# Gets a count of non-test/ghost participants whose consents have been validated
# (participant has entries in consent_file table), if the consent_file entry was created on/before the
# end date for this report
sql = VALIDATED_PARTICIPANTS_COUNT_SQL + ORIGIN_SQL_FILTER
cursor.execute(sql.format_map(SafeDict(end_date=self.end_date.strftime("%Y-%m-%d"),
origin_filter=self.origin_value.lower())))
validated_count = cursor.fetchone()[0]
# Pandas: Gets the number of unique participant_id values from the main (unresolved errors) dataframe
# that was created at the start of the weekly report generation
participants_with_errors = self.consent_df['participant_id'].nunique()
participants_no_issues = validated_count - participants_with_errors
participants_need_validation = consented_count - validated_count
burndown_data = [
['DRC CONSENT VALIDATION BURNDOWN'],
['',
'Total Consented Participants',
'Participants With No Consent Issues Detected',
'Participants With Unresolved Issues (for 1 or more consent types)',
'Participants Not Yet Validated'],
['Participant Counts',
self.format_number(consented_count),
self.format_number(participants_no_issues),
self.format_number(participants_with_errors),
self.format_number(participants_need_validation)
]
]
start_burndown_row = self.row_pos
end_burndown_row= start_burndown_row + len(burndown_data)
burndown_cell_range = self._make_a1_notation(start_burndown_row, end_col=5, end_row=end_burndown_row)
self._add_report_rows(burndown_cell_range, burndown_data)
# Format the burndown sub-table header and column headers
self._add_report_formatting(self._make_a1_notation(start_burndown_row, end_col=5),
self.format_specs.get('burndown_header_row'))
self._add_report_formatting(self._make_a1_notation(start_burndown_row + 1, end_col=5),
self.format_specs.get('burndown_column_headers'))
# Format the burndown sub-table content row (first column is bolded)
self._add_report_formatting(self._make_a1_notation(end_burndown_row - 1),
self.format_specs.get('bold_small_wrapped'))
# Inject whitespace after the validation burndown details
self.row_pos = end_burndown_row + 3
def add_weekly_file_issue_burndown_section(self):
"""
Add a section/sub-table that tracks how many consent issues have been resolved, overall and during the
date range covered by the report. "Resolved" means consent_file entries that have been marked
OBSOLETE as their sync_status, indicating a newer file was received that has passed validation. The modified
date of an OBSOLETE entry should also indicate when the resolution occurred. Do not expect consent_file
records to be modified any more after being marked OBSOLETE.
"""
# Count of all resolved (OBSOLETE) consent files, and all of the oustanding issues (main report data in the
# self.consent_df dataframe). These dataframes were populated at the start of the report execution
total_resolved = self.resolved_df.shape[0]
still_unresolved = self.consent_df.shape[0]
# Count of OBSOLETE consent files last modified in the report date range. DATE(modified) = resolved_date
resolved_in_report_date_range = self.resolved_df.loc[(self.resolved_df.resolved_date >= self.start_date.date())\
& (self.resolved_df.resolved_date <= self.end_date.date())].shape[0]
report_range_start = self.start_date.strftime("%Y-%m-%d")
report_range_end = self.end_date.strftime("%Y-%m-%d")
# Add stats on how many consent file issues have been resolved, all time and during report date range
resolution_counts_data = [
['CONSENT FILE ISSUE RESOLUTION BURNDOWN'],
['', 'Cumulative file resolutions',
f'Resolved from {report_range_start} to {report_range_end}',
'Files pending resolution'
],
['File counts',
self.format_number(total_resolved),
self.format_number(resolved_in_report_date_range),
self.format_number(still_unresolved)
]
]
end_resolution_counts_row = self.row_pos + len(resolution_counts_data)
# Extend the resolution header row by an extra column to align with validation burndown sub-section/table
resolution_header_row = self._make_a1_notation(self.row_pos, end_col=5)
resolution_counts_header_row = self._make_a1_notation(self.row_pos+1, end_col=5)
resolution_counts_data_row = self._make_a1_notation(self.row_pos+2)
self._add_report_rows(self._make_a1_notation(self.row_pos, end_col=5, end_row=end_resolution_counts_row),
resolution_counts_data)
self._add_report_formatting(resolution_header_row, self.format_specs.get('burndown_header_row'))
self._add_report_formatting(resolution_counts_header_row,
self.format_specs.get('burndown_column_headers'))
# Format the burndown sub-table content row (first column is bolded)
self._add_report_formatting(resolution_counts_data_row,
self.format_specs.get('bold_small_wrapped'))
self.row_pos = end_resolution_counts_row + 2
def add_weekly_aggregate_outstanding_counts_section(self):
"""
Generates a summary of all outstanding issues, by consent type / participants impacted
"""
outstanding_counts_text_cell = self._make_a1_notation(self.row_pos)
self._add_report_rows(outstanding_counts_text_cell, [
['Summary of all outstanding consent issues, by consent type / participants impacted']
])
self._add_report_formatting(outstanding_counts_text_cell, self.format_specs.get('bold_text'))
# Generate the "All outstanding consent issues" summary counts
self._add_consent_issue_count_header_section(hpo='All Entities', row_pos=self.row_pos + 1)
self._add_consent_issue_counts(self.consent_df, show_all_counts=True)
self._add_report_formatting(self._make_a1_notation(self.row_pos - 1, end_col=self.sheet_cols),
self.format_specs.get('solid_thick_border'))
self.row_pos += 1
def add_weekly_recent_errors_section(self):
"""
Provide a breakdown of unresolved issues detected in the report date range, from recently authored consents
"""
start_date = self.start_date.date()
end_date = self.end_date.date()
# Created a filtered dataframe from the main unresolved errors dataframe, where the authored dates for the
# consents with unresolved issues is within the report date range
weekly_errors = self.consent_df.loc[(self.consent_df.consent_authored_date >= start_date) &\
(self.consent_df.consent_authored_date <= end_date)]
# Add the weekly consent summary details if errors exist for newly authored consents
start_date_str = start_date.strftime("%Y-%m-%d")
end_date_str = end_date.strftime("%Y-%m-%d")
if self._has_needs_correcting(weekly_errors):
# Add section description text
self.row_pos += 1
section_text_cell = self._make_a1_notation(self.row_pos)
text_str = f'Outstanding issues for consents authored between {start_date_str} and {end_date_str}' + \
f' (by HPO/Organization)'
self._add_report_rows(section_text_cell, [[text_str]])
self._add_report_formatting(section_text_cell, self.format_specs.get('bold_text'))
self._add_errors_by_org(df=weekly_errors)
else:
text_cell = self._make_a1_notation(self.row_pos)
text_str = f'No outstanding issues for recent consents authored between {start_date_str} and {end_date_str}'
self._add_report_rows(text_cell, [[text_str]])
self._add_report_formatting(text_cell, self.format_specs.get('italic_text'))
self.row_pos += 1
self.row_pos += 1
def create_weekly_report(self, spreadsheet):
existing_sheets = spreadsheet.worksheets()
# Perform rolling deletion of the oldest reports so we keep a pre-defined maximum number of weekly reports
# NOTE: this assumes all the reports in the file were generated in order, with the most recent date at the
# leftmost tab (index 0). This deletes sheets from the existing_sheets list, starting at the rightmost tab
for ws_index in range(len(existing_sheets), self.max_weekly_reports - 1, -1):
spreadsheet.del_worksheet(existing_sheets[ws_index - 1])
# Add the new worksheet (to leftmost tab position / index 0)
origin_str = self.origin_value
tab_title = f'{origin_str} {self.start_date.strftime("%Y-%m-%d")} to {self.end_date.strftime("%Y-%m-%d")}'
self.worksheet = spreadsheet.add_worksheet(tab_title,
rows=self.sheet_rows,
cols=self.sheet_cols,
index=1)
# Add Report title text indicating date range covered
start_str = self.start_date.strftime("%b %-d %Y")
end_str = self.end_date.strftime("%b %-d %Y")
report_title_str = f'{origin_str} Consent Validation Status Report for {start_str} to {end_str}'
title_cell = self._make_a1_notation(self.row_pos)
self._add_report_rows(title_cell, [[report_title_str]])
self._add_report_formatting(title_cell, self.format_specs.get('bold_text'))
self._add_text_rows(
text_rows=[['Notes:'],
[f'Participant and consent counts in this sheet limited to {origin_str} participants only'],
['Participants Not Yet Validated count may fluctuate due to newly consented participants ' +\
'whose consent files are pending validation'],
['File resolutions include retransmission of files which are successfully validated, ' +\
'or correction of any false positive issue notifications from automated validation tools']],
format_spec=self.format_specs.get('legend_text'),
row_pos=self.row_pos+1)
self.row_pos += 2
#-- Generate main content of report --
# Validation burndown: show how many participants have had their consent files validated, # with issues, etc.
# File issue burndown: show how many outstanding file issues have been resolved (cumulative and in past week)
# Aggregate outstanding counts: Breakdown of outstanding issues by consent type and participants impacted
# Recent errors: Newly detected validation errors from recently authored consents (authored in past week)
self.add_weekly_validation_burndown_section()
self.add_weekly_file_issue_burndown_section()
self.add_weekly_aggregate_outstanding_counts_section()
self.add_weekly_recent_errors_section()
# Inject whitespace
self.row_pos += 2
# Breakdown of all outstanding issues by HPO/Organization (if any issues still exist)
if self._has_needs_correcting(self.consent_df):
self._add_text_rows(
text_rows=[['All Outstanding Issues including Retrospective Validations (by HPO/Organization)']],
format_spec=self.format_specs.get('bold_text'))
# Add the HPO/Organization breakdown of outstanding issues
self._add_errors_by_org()
self._write_report_content()
def execute(self):
"""
Execute the WeeklyConsentReport builder
"""
_logger.info('Setting up database connection and google doc access...')
self._connect_to_rdr_replica()
service_key_info = gcp_get_iam_service_key_info(self.gcp_env.service_key_id)
gs_creds = gspread.service_account(service_key_info['key_path'])
gs_file = gs_creds.open_by_key(self.doc_id)
# These origin strings will be converted to lowercase when used as query filter values
for origin in ['Vibrent', 'CareEvolution']:
self._set_origin_value(origin)
_logger.info(f'Retrieving consent validation records for {self.origin_value}.....')
# consent_df will contain all the outstanding NEEDS_CORRECTING issues that still need resolution
# start_date/end_date refer to the consent authored date range; the validation end date (when the
# consent_file records were created) is up to a day later than the consent authored end date
self.consent_df = self._get_consent_validation_dataframe(
self.report_sql.format_map(SafeDict(start_date=self.start_date.strftime("%Y-%m-%d"),
end_date=self.end_date.strftime("%Y-%m-%d"),
validation_end_date=self.validation_end_date.strftime("%Y-%m-%d"),
report_date=self.report_date.strftime("%Y-%m-%d"),
origin_filter=self.origin_value.lower())))
# Workaround: filtering out results for older consents where programmatic PDF validation flagged files
# where it couldn't find signature/signing date, even though the files looked okay on visual inspection
self.consent_df = self.remove_potential_false_positives_for_missing_signature(self.consent_df)
# Get all the resolved/OBSOLETE issues for generating resolution stats
self.resolved_df = self.get_resolved_consent_issues_dataframe()
_logger.info('Generating report data...')
self.create_weekly_report(gs_file)
_logger.info('Report complete')
self._clear_report()
def run():
# Set global debug value and setup application logging.
setup_logging(
_logger, tool_cmd, "--debug" in sys.argv, "{0}.log".format(tool_cmd) if "--log-file" in sys.argv else None
)
setup_i18n()
# Setup program arguments. NOTE: This tool defaults to PRODUCTION project/service account
parser = argparse.ArgumentParser(prog=tool_cmd, description=tool_desc)
parser.add_argument("--debug", help="enable debug output", default=False, action="store_true") # noqa
parser.add_argument("--log-file", help="write output to a log file", default=False, action="store_true") # noqa
parser.add_argument("--project", help="gcp project name", default=RdrEnvironment.PROD.value) # noqa
parser.add_argument("--account", help="pmi-ops account", default=None) # noqa
parser.add_argument("--service-account", help="gcp iam service account",
default=f'configurator@{RdrEnvironment.PROD.value}.iam.gserviceaccount.com') #noqa
parser.add_argument("--doc-id", type=str,
help="A google doc ID which can override a [DAILY|WEEKLY]_CONSENT_DOC_ID env var")
parser.add_argument("--report-type", type=str, default="daily_uploads", metavar='REPORT',
help="Report to generate. Default is daily_uploads")
parser.add_argument("--report-date", type=lambda s: datetime.strptime(s, '%Y-%m-%d'),
help="Date of the consents (authored) in YYYY-MM-DD format. Default is yesterday's date")
parser.add_argument("--start-date", type=lambda s: datetime.strptime(s, '%Y-%m-%d'),
help="Start date of range for consents (authored) in YYYY-MM-DD format. Default is 8 days ago")
parser.add_argument("--end-date", type=lambda s: datetime.strptime(s, '%Y-%m-%d'),
help="End date of range for consents (authored) in YYYY-MM-DD format. Default is 1 day ago")
parser.add_argument("--sheet-only", default=False, action="store_true",
help="Only generate the googlesheet report, skip generating the CSV file")
parser.add_argument("--csv-only", default=False, action="store_true",
help="Only generate the CSV errors file, skip generating google sheet content")
parser.epilog = f'Possible REPORT types: {{{",".join(REPORT_TYPES)}}}.'
args = parser.parse_args()
with GCPProcessContext(tool_cmd, args.project, args.account, args.service_account) as gcp_env:
if args.report_type == 'daily_uploads':
process = DailyConsentReport(args, gcp_env)
elif args.report_type == 'weekly_status':
process = WeeklyConsentReport(args, gcp_env)
else:
raise(ValueError, "Invalid report type specified")
exit_code = process.execute()
return exit_code
# --- Main Program Call ---
if __name__ == "__main__":
sys.exit(run())
| bsd-3-clause | 8118abee2eb2dd1da680df8ed5878c4b | 54.575921 | 120 | 0.625016 | 4.005439 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/encounter_tests.py | 1 | 28372 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import encounter
from .fhirdate import FHIRDate
class EncounterTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Encounter", js["resourceType"])
return encounter.Encounter(js)
def testEncounter1(self):
inst = self.instantiate_from("encounter-example-emerg.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter1(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter1(inst2)
def implEncounter1(self, inst):
self.assertEqual(inst.classHistory[0].class_fhir.code, "EMER")
self.assertEqual(inst.classHistory[0].class_fhir.display, "emergency")
self.assertEqual(inst.classHistory[0].class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.classHistory[0].period.end.date, FHIRDate("2017-02-01T09:27:00+10:00").date)
self.assertEqual(inst.classHistory[0].period.end.as_json(), "2017-02-01T09:27:00+10:00")
self.assertEqual(inst.classHistory[0].period.start.date, FHIRDate("2017-02-01T07:15:00+10:00").date)
self.assertEqual(inst.classHistory[0].period.start.as_json(), "2017-02-01T07:15:00+10:00")
self.assertEqual(inst.classHistory[1].class_fhir.code, "IMP")
self.assertEqual(inst.classHistory[1].class_fhir.display, "inpatient encounter")
self.assertEqual(inst.classHistory[1].class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.classHistory[1].period.start.date, FHIRDate("2017-02-01T09:27:00+10:00").date)
self.assertEqual(inst.classHistory[1].period.start.as_json(), "2017-02-01T09:27:00+10:00")
self.assertEqual(inst.class_fhir.code, "IMP")
self.assertEqual(inst.class_fhir.display, "inpatient encounter")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "emd")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "From accident/emergency department")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://hl7.org/fhir/admit-source")
self.assertEqual(inst.id, "emerg")
self.assertEqual(inst.location[0].period.end.date, FHIRDate("2017-02-01T08:45:00+10:00").date)
self.assertEqual(inst.location[0].period.end.as_json(), "2017-02-01T08:45:00+10:00")
self.assertEqual(inst.location[0].period.start.date, FHIRDate("2017-02-01T07:15:00+10:00").date)
self.assertEqual(inst.location[0].period.start.as_json(), "2017-02-01T07:15:00+10:00")
self.assertEqual(inst.location[0].status, "active")
self.assertEqual(inst.location[1].period.end.date, FHIRDate("2017-02-01T09:27:00+10:00").date)
self.assertEqual(inst.location[1].period.end.as_json(), "2017-02-01T09:27:00+10:00")
self.assertEqual(inst.location[1].period.start.date, FHIRDate("2017-02-01T08:45:00+10:00").date)
self.assertEqual(inst.location[1].period.start.as_json(), "2017-02-01T08:45:00+10:00")
self.assertEqual(inst.location[1].status, "active")
self.assertEqual(inst.location[2].period.end.date, FHIRDate("2017-02-01T12:15:00+10:00").date)
self.assertEqual(inst.location[2].period.end.as_json(), "2017-02-01T12:15:00+10:00")
self.assertEqual(inst.location[2].period.start.date, FHIRDate("2017-02-01T09:27:00+10:00").date)
self.assertEqual(inst.location[2].period.start.as_json(), "2017-02-01T09:27:00+10:00")
self.assertEqual(inst.location[2].status, "active")
self.assertEqual(inst.location[3].period.end.date, FHIRDate("2017-02-01T12:45:00+10:00").date)
self.assertEqual(inst.location[3].period.end.as_json(), "2017-02-01T12:45:00+10:00")
self.assertEqual(inst.location[3].period.start.date, FHIRDate("2017-02-01T12:15:00+10:00").date)
self.assertEqual(inst.location[3].period.start.as_json(), "2017-02-01T12:15:00+10:00")
self.assertEqual(inst.location[3].status, "reserved")
self.assertEqual(inst.location[4].period.start.date, FHIRDate("2017-02-01T12:45:00+10:00").date)
self.assertEqual(inst.location[4].period.start.as_json(), "2017-02-01T12:45:00+10:00")
self.assertEqual(inst.location[4].status, "active")
self.assertEqual(inst.period.start.date, FHIRDate("2017-02-01T07:15:00+10:00").date)
self.assertEqual(inst.period.start.as_json(), "2017-02-01T07:15:00+10:00")
self.assertEqual(inst.status, "in-progress")
self.assertEqual(inst.statusHistory[0].period.end.date, FHIRDate("2017-02-01T07:35:00+10:00").date)
self.assertEqual(inst.statusHistory[0].period.end.as_json(), "2017-02-01T07:35:00+10:00")
self.assertEqual(inst.statusHistory[0].period.start.date, FHIRDate("2017-02-01T07:15:00+10:00").date)
self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2017-02-01T07:15:00+10:00")
self.assertEqual(inst.statusHistory[0].status, "arrived")
self.assertEqual(inst.statusHistory[1].period.end.date, FHIRDate("2017-02-01T08:45:00+10:00").date)
self.assertEqual(inst.statusHistory[1].period.end.as_json(), "2017-02-01T08:45:00+10:00")
self.assertEqual(inst.statusHistory[1].period.start.date, FHIRDate("2017-02-01T07:35:00+10:00").date)
self.assertEqual(inst.statusHistory[1].period.start.as_json(), "2017-02-01T07:35:00+10:00")
self.assertEqual(inst.statusHistory[1].status, "triaged")
self.assertEqual(inst.statusHistory[2].period.end.date, FHIRDate("2017-02-01T12:15:00+10:00").date)
self.assertEqual(inst.statusHistory[2].period.end.as_json(), "2017-02-01T12:15:00+10:00")
self.assertEqual(inst.statusHistory[2].period.start.date, FHIRDate("2017-02-01T08:45:00+10:00").date)
self.assertEqual(inst.statusHistory[2].period.start.as_json(), "2017-02-01T08:45:00+10:00")
self.assertEqual(inst.statusHistory[2].status, "in-progress")
self.assertEqual(inst.statusHistory[3].period.end.date, FHIRDate("2017-02-01T12:45:00+10:00").date)
self.assertEqual(inst.statusHistory[3].period.end.as_json(), "2017-02-01T12:45:00+10:00")
self.assertEqual(inst.statusHistory[3].period.start.date, FHIRDate("2017-02-01T12:15:00+10:00").date)
self.assertEqual(inst.statusHistory[3].period.start.as_json(), "2017-02-01T12:15:00+10:00")
self.assertEqual(inst.statusHistory[3].status, "onleave")
self.assertEqual(inst.statusHistory[4].period.start.date, FHIRDate("2017-02-01T12:45:00+10:00").date)
self.assertEqual(inst.statusHistory[4].period.start.as_json(), "2017-02-01T12:45:00+10:00")
self.assertEqual(inst.statusHistory[4].status, "in-progress")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Emergency visit that escalated into inpatient patient @example</div>")
self.assertEqual(inst.text.status, "generated")
def testEncounter2(self):
inst = self.instantiate_from("encounter-example-f001-heart.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter2(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter2(inst2)
def implEncounter2(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "305956004")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "Referral by physician")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].code, "306689006")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].display, "Discharge to home")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.system, "http://www.amc.nl/zorgportal/identifiers/pre-admissions")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.use, "official")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.value, "93042")
self.assertEqual(inst.id, "f001")
self.assertEqual(inst.identifier[0].system, "http://www.amc.nl/zorgportal/identifiers/visits")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "v1451")
self.assertEqual(inst.length.code, "min")
self.assertEqual(inst.length.system, "http://unitsofmeasure.org")
self.assertEqual(inst.length.unit, "min")
self.assertEqual(inst.length.value, 140)
self.assertEqual(inst.priority.coding[0].code, "310361003")
self.assertEqual(inst.priority.coding[0].display, "Non-urgent cardiological admission")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].coding[0].code, "34068001")
self.assertEqual(inst.reason[0].coding[0].display, "Heart valve replacement")
self.assertEqual(inst.reason[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "270427003")
self.assertEqual(inst.type[0].coding[0].display, "Patient-initiated encounter")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter3(self):
inst = self.instantiate_from("encounter-example-f002-lung.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter3(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter3(inst2)
def implEncounter3(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "305997006")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "Referral by radiologist")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].code, "306689006")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].display, "Discharge to home")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.system, "http://www.bmc.nl/zorgportal/identifiers/pre-admissions")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.use, "official")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.value, "98682")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.identifier[0].system, "http://www.bmc.nl/zorgportal/identifiers/encounters")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "v3251")
self.assertEqual(inst.length.code, "min")
self.assertEqual(inst.length.system, "http://unitsofmeasure.org")
self.assertEqual(inst.length.unit, "min")
self.assertEqual(inst.length.value, 140)
self.assertEqual(inst.priority.coding[0].code, "103391001")
self.assertEqual(inst.priority.coding[0].display, "Urgent")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].coding[0].code, "34068001")
self.assertEqual(inst.reason[0].coding[0].display, "Partial lobectomy of lung")
self.assertEqual(inst.reason[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "270427003")
self.assertEqual(inst.type[0].coding[0].display, "Patient-initiated encounter")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter4(self):
inst = self.instantiate_from("encounter-example-f003-abscess.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter4(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter4(inst2)
def implEncounter4(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "305956004")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "Referral by physician")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].code, "306689006")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].display, "Discharge to home")
self.assertEqual(inst.hospitalization.dischargeDisposition.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.system, "http://www.bmc.nl/zorgportal/identifiers/pre-admissions")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.use, "official")
self.assertEqual(inst.hospitalization.preAdmissionIdentifier.value, "93042")
self.assertEqual(inst.id, "f003")
self.assertEqual(inst.identifier[0].system, "http://www.bmc.nl/zorgportal/identifiers/encounters")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "v6751")
self.assertEqual(inst.length.code, "min")
self.assertEqual(inst.length.system, "http://unitsofmeasure.org")
self.assertEqual(inst.length.unit, "min")
self.assertEqual(inst.length.value, 90)
self.assertEqual(inst.priority.coding[0].code, "103391001")
self.assertEqual(inst.priority.coding[0].display, "Non-urgent ear, nose and throat admission")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].coding[0].code, "18099001")
self.assertEqual(inst.reason[0].coding[0].display, "Retropharyngeal abscess")
self.assertEqual(inst.reason[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].extension[0].url, "http://hl7.org/fhir/StructureDefinition/encounter-primaryDiagnosis")
self.assertEqual(inst.reason[0].extension[0].valuePositiveInt, 1)
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "270427003")
self.assertEqual(inst.type[0].coding[0].display, "Patient-initiated encounter")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter5(self):
inst = self.instantiate_from("encounter-example-f201-20130404.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter5(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter5(inst2)
def implEncounter5(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.id, "f201")
self.assertEqual(inst.identifier[0].use, "temp")
self.assertEqual(inst.identifier[0].value, "Encounter_Roel_20130404")
self.assertEqual(inst.priority.coding[0].code, "17621005")
self.assertEqual(inst.priority.coding[0].display, "Normal")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].text, "The patient had fever peaks over the last couple of days. He is worried about these peaks.")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "11429006")
self.assertEqual(inst.type[0].coding[0].display, "Consultation")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter6(self):
inst = self.instantiate_from("encounter-example-f202-20130128.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter6(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter6(inst2)
def implEncounter6(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.diagnosis[0].rank, 1)
self.assertEqual(inst.diagnosis[0].role.coding[0].code, "AD")
self.assertEqual(inst.diagnosis[0].role.coding[0].display, "Admission diagnosis")
self.assertEqual(inst.diagnosis[0].role.coding[0].system, "http://hl7.org/fhir/diagnosis-role")
self.assertEqual(inst.id, "f202")
self.assertEqual(inst.identifier[0].use, "temp")
self.assertEqual(inst.identifier[0].value, "Encounter_Roel_20130128")
self.assertEqual(inst.length.code, "min")
self.assertEqual(inst.length.system, "http://unitsofmeasure.org")
self.assertEqual(inst.length.unit, "minutes")
self.assertEqual(inst.length.value, 56)
self.assertEqual(inst.priority.coding[0].code, "103391001")
self.assertEqual(inst.priority.coding[0].display, "Urgent")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].extension[0].url, "http://hl7.org/fhir/StructureDefinition/encounter-primaryDiagnosis")
self.assertEqual(inst.reason[0].extension[0].valuePositiveInt, 2)
self.assertEqual(inst.reason[0].text, "The patient is treated for a tumor.")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "367336001")
self.assertEqual(inst.type[0].coding[0].display, "Chemotherapy")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter7(self):
inst = self.instantiate_from("encounter-example-f203-20130311.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter7(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter7(inst2)
def implEncounter7(self, inst):
self.assertEqual(inst.class_fhir.code, "IMP")
self.assertEqual(inst.class_fhir.display, "inpatient encounter")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.diagnosis[0].rank, 1)
self.assertEqual(inst.diagnosis[0].role.coding[0].code, "AD")
self.assertEqual(inst.diagnosis[0].role.coding[0].display, "Admission diagnosis")
self.assertEqual(inst.diagnosis[0].role.coding[0].system, "http://hl7.org/fhir/diagnosis-role")
self.assertEqual(inst.diagnosis[1].role.coding[0].code, "DD")
self.assertEqual(inst.diagnosis[1].role.coding[0].display, "Discharge diagnosis")
self.assertEqual(inst.diagnosis[1].role.coding[0].system, "http://hl7.org/fhir/diagnosis-role")
self.assertEqual(inst.hospitalization.admitSource.coding[0].code, "309902002")
self.assertEqual(inst.hospitalization.admitSource.coding[0].display, "Clinical Oncology Department")
self.assertEqual(inst.hospitalization.admitSource.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.dietPreference[0].coding[0].code, "276026009")
self.assertEqual(inst.hospitalization.dietPreference[0].coding[0].display, "Fluid balance regulation")
self.assertEqual(inst.hospitalization.dietPreference[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.hospitalization.reAdmission.coding[0].display, "readmitted")
self.assertEqual(inst.hospitalization.specialArrangement[0].coding[0].code, "wheel")
self.assertEqual(inst.hospitalization.specialArrangement[0].coding[0].display, "Wheelchair")
self.assertEqual(inst.hospitalization.specialArrangement[0].coding[0].system, "http://hl7.org/fhir/encounter-special-arrangements")
self.assertEqual(inst.hospitalization.specialCourtesy[0].coding[0].code, "NRM")
self.assertEqual(inst.hospitalization.specialCourtesy[0].coding[0].display, "normal courtesy")
self.assertEqual(inst.hospitalization.specialCourtesy[0].coding[0].system, "http://hl7.org/fhir/v3/EncounterSpecialCourtesy")
self.assertEqual(inst.id, "f203")
self.assertEqual(inst.identifier[0].use, "temp")
self.assertEqual(inst.identifier[0].value, "Encounter_Roel_20130311")
self.assertEqual(inst.participant[0].type[0].coding[0].code, "PART")
self.assertEqual(inst.participant[0].type[0].coding[0].system, "http://hl7.org/fhir/v3/ParticipationType")
self.assertEqual(inst.period.end.date, FHIRDate("2013-03-20").date)
self.assertEqual(inst.period.end.as_json(), "2013-03-20")
self.assertEqual(inst.period.start.date, FHIRDate("2013-03-11").date)
self.assertEqual(inst.period.start.as_json(), "2013-03-11")
self.assertEqual(inst.priority.coding[0].code, "394849002")
self.assertEqual(inst.priority.coding[0].display, "High priority")
self.assertEqual(inst.priority.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].text, "The patient seems to suffer from bilateral pneumonia and renal insufficiency, most likely due to chemotherapy.")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.statusHistory[0].period.start.date, FHIRDate("2013-03-08").date)
self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2013-03-08")
self.assertEqual(inst.statusHistory[0].status, "arrived")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type[0].coding[0].code, "183807002")
self.assertEqual(inst.type[0].coding[0].display, "Inpatient stay for nine days")
self.assertEqual(inst.type[0].coding[0].system, "http://snomed.info/sct")
def testEncounter8(self):
inst = self.instantiate_from("encounter-example-home.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter8(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter8(inst2)
def implEncounter8(self, inst):
self.assertEqual(inst.class_fhir.code, "HH")
self.assertEqual(inst.class_fhir.display, "home health")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.contained[0].id, "home")
self.assertEqual(inst.id, "home")
self.assertEqual(inst.location[0].period.end.date, FHIRDate("2015-01-17T16:30:00+10:00").date)
self.assertEqual(inst.location[0].period.end.as_json(), "2015-01-17T16:30:00+10:00")
self.assertEqual(inst.location[0].period.start.date, FHIRDate("2015-01-17T16:00:00+10:00").date)
self.assertEqual(inst.location[0].period.start.as_json(), "2015-01-17T16:00:00+10:00")
self.assertEqual(inst.location[0].status, "completed")
self.assertEqual(inst.participant[0].period.end.date, FHIRDate("2015-01-17T16:30:00+10:00").date)
self.assertEqual(inst.participant[0].period.end.as_json(), "2015-01-17T16:30:00+10:00")
self.assertEqual(inst.participant[0].period.start.date, FHIRDate("2015-01-17T16:00:00+10:00").date)
self.assertEqual(inst.participant[0].period.start.as_json(), "2015-01-17T16:00:00+10:00")
self.assertEqual(inst.period.end.date, FHIRDate("2015-01-17T16:30:00+10:00").date)
self.assertEqual(inst.period.end.as_json(), "2015-01-17T16:30:00+10:00")
self.assertEqual(inst.period.start.date, FHIRDate("2015-01-17T16:00:00+10:00").date)
self.assertEqual(inst.period.start.as_json(), "2015-01-17T16:00:00+10:00")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Encounter with patient @example who is at home</div>")
self.assertEqual(inst.text.status, "generated")
def testEncounter9(self):
inst = self.instantiate_from("encounter-example-xcda.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter9(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter9(inst2)
def implEncounter9(self, inst):
self.assertEqual(inst.class_fhir.code, "AMB")
self.assertEqual(inst.class_fhir.display, "ambulatory")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.id, "xcda")
self.assertEqual(inst.identifier[0].system, "http://healthcare.example.org/identifiers/enocunter")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "1234213.52345873")
self.assertEqual(inst.reason[0].coding[0].code, "T-D8200")
self.assertEqual(inst.reason[0].coding[0].display, "Arm")
self.assertEqual(inst.reason[0].coding[0].system, "http://ihe.net/xds/connectathon/eventCodes")
self.assertEqual(inst.status, "finished")
self.assertEqual(inst.text.status, "generated")
def testEncounter10(self):
inst = self.instantiate_from("encounter-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Encounter instance")
self.implEncounter10(inst)
js = inst.as_json()
self.assertEqual("Encounter", js["resourceType"])
inst2 = encounter.Encounter(js)
self.implEncounter10(inst2)
def implEncounter10(self, inst):
self.assertEqual(inst.class_fhir.code, "IMP")
self.assertEqual(inst.class_fhir.display, "inpatient encounter")
self.assertEqual(inst.class_fhir.system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.status, "in-progress")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Encounter with patient @example</div>")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | a8763fc5cf86ed09e7495947918f4cf1 | 64.524249 | 159 | 0.695615 | 3.178579 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/clinicalimpression.py | 1 | 10611 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/ClinicalImpression) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class ClinicalImpression(domainresource.DomainResource):
""" A clinical assessment performed when planning treatments and management
strategies for a patient.
A record of a clinical assessment performed to determine what problem(s)
may affect the patient and before planning the treatments or management
strategies that are best to manage a patient's condition. Assessments are
often 1:1 with a clinical consultation / encounter, but this varies
greatly depending on the clinical workflow. This resource is called
"ClinicalImpression" rather than "ClinicalAssessment" to avoid confusion
with the recording of assessment tools such as Apgar score.
"""
resource_type = "ClinicalImpression"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" Action taken as part of assessment procedure.
List of `FHIRReference` items referencing `ReferralRequest, ProcedureRequest, Procedure, MedicationRequest, Appointment` (represented as `dict` in JSON). """
self.assessor = None
""" The clinician performing the assessment.
Type `FHIRReference` referencing `Practitioner` (represented as `dict` in JSON). """
self.code = None
""" Kind of assessment performed.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.context = None
""" Encounter or Episode created from.
Type `FHIRReference` referencing `Encounter, EpisodeOfCare` (represented as `dict` in JSON). """
self.date = None
""" When the assessment was documented.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Why/how the assessment was performed.
Type `str`. """
self.effectiveDateTime = None
""" Time of assessment.
Type `FHIRDate` (represented as `str` in JSON). """
self.effectivePeriod = None
""" Time of assessment.
Type `Period` (represented as `dict` in JSON). """
self.finding = None
""" Possible or likely findings and diagnoses.
List of `ClinicalImpressionFinding` items (represented as `dict` in JSON). """
self.identifier = None
""" Business identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.investigation = None
""" One or more sets of investigations (signs, symptions, etc.).
List of `ClinicalImpressionInvestigation` items (represented as `dict` in JSON). """
self.note = None
""" Comments made about the ClinicalImpression.
List of `Annotation` items (represented as `dict` in JSON). """
self.previous = None
""" Reference to last assessment.
Type `FHIRReference` referencing `ClinicalImpression` (represented as `dict` in JSON). """
self.problem = None
""" Relevant impressions of patient state.
List of `FHIRReference` items referencing `Condition, AllergyIntolerance` (represented as `dict` in JSON). """
self.prognosisCodeableConcept = None
""" Estimate of likely outcome.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.prognosisReference = None
""" RiskAssessment expressing likely outcome.
List of `FHIRReference` items referencing `RiskAssessment` (represented as `dict` in JSON). """
self.protocol = None
""" Clinical Protocol followed.
List of `str` items. """
self.status = None
""" draft | completed | entered-in-error.
Type `str`. """
self.subject = None
""" Patient or group assessed.
Type `FHIRReference` referencing `Patient, Group` (represented as `dict` in JSON). """
self.summary = None
""" Summary of the assessment.
Type `str`. """
super(ClinicalImpression, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ClinicalImpression, self).elementProperties()
js.extend([
("action", "action", fhirreference.FHIRReference, True, None, False),
("assessor", "assessor", fhirreference.FHIRReference, False, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("context", "context", fhirreference.FHIRReference, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("effectiveDateTime", "effectiveDateTime", fhirdate.FHIRDate, False, "effective", False),
("effectivePeriod", "effectivePeriod", period.Period, False, "effective", False),
("finding", "finding", ClinicalImpressionFinding, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("investigation", "investigation", ClinicalImpressionInvestigation, True, None, False),
("note", "note", annotation.Annotation, True, None, False),
("previous", "previous", fhirreference.FHIRReference, False, None, False),
("problem", "problem", fhirreference.FHIRReference, True, None, False),
("prognosisCodeableConcept", "prognosisCodeableConcept", codeableconcept.CodeableConcept, True, None, False),
("prognosisReference", "prognosisReference", fhirreference.FHIRReference, True, None, False),
("protocol", "protocol", str, True, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("summary", "summary", str, False, None, False),
])
return js
from . import backboneelement
class ClinicalImpressionFinding(backboneelement.BackboneElement):
""" Possible or likely findings and diagnoses.
Specific findings or diagnoses that was considered likely or relevant to
ongoing treatment.
"""
resource_type = "ClinicalImpressionFinding"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.basis = None
""" Which investigations support finding.
Type `str`. """
self.itemCodeableConcept = None
""" What was found.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.itemReference = None
""" What was found.
Type `FHIRReference` referencing `Condition, Observation` (represented as `dict` in JSON). """
super(ClinicalImpressionFinding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ClinicalImpressionFinding, self).elementProperties()
js.extend([
("basis", "basis", str, False, None, False),
("itemCodeableConcept", "itemCodeableConcept", codeableconcept.CodeableConcept, False, "item", True),
("itemReference", "itemReference", fhirreference.FHIRReference, False, "item", True),
])
return js
class ClinicalImpressionInvestigation(backboneelement.BackboneElement):
""" One or more sets of investigations (signs, symptions, etc.).
One or more sets of investigations (signs, symptions, etc.). The actual
grouping of investigations vary greatly depending on the type and context
of the assessment. These investigations may include data generated during
the assessment process, or data previously generated and recorded that is
pertinent to the outcomes.
"""
resource_type = "ClinicalImpressionInvestigation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" A name/code for the set.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.item = None
""" Record of a specific investigation.
List of `FHIRReference` items referencing `Observation, QuestionnaireResponse, FamilyMemberHistory, DiagnosticReport, RiskAssessment, ImagingStudy` (represented as `dict` in JSON). """
super(ClinicalImpressionInvestigation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ClinicalImpressionInvestigation, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, True),
("item", "item", fhirreference.FHIRReference, True, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| bsd-3-clause | d0646cd822bf9328ef3396db87a81805 | 41.78629 | 192 | 0.640844 | 4.283811 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/elementdefinition.py | 1 | 62683 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/ElementDefinition) on 2017-03-22.
# 2017, SMART Health IT.
from . import element
class ElementDefinition(element.Element):
""" Definition of an element in a resource or extension.
Captures constraints on each element within the resource, profile, or
extension.
"""
resource_type = "ElementDefinition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.alias = None
""" Other names.
List of `str` items. """
self.base = None
""" Base definition information for tools.
Type `ElementDefinitionBase` (represented as `dict` in JSON). """
self.binding = None
""" ValueSet details if this is coded.
Type `ElementDefinitionBinding` (represented as `dict` in JSON). """
self.code = None
""" Corresponding codes in terminologies.
List of `Coding` items (represented as `dict` in JSON). """
self.comment = None
""" Comments about the use of this element.
Type `str`. """
self.condition = None
""" Reference to invariant about presence.
List of `str` items. """
self.constraint = None
""" Condition that must evaluate to true.
List of `ElementDefinitionConstraint` items (represented as `dict` in JSON). """
self.contentReference = None
""" Reference to definition of content for the element.
Type `str`. """
self.defaultValueAddress = None
""" Specified value if missing from instance.
Type `Address` (represented as `dict` in JSON). """
self.defaultValueAge = None
""" Specified value if missing from instance.
Type `Age` (represented as `dict` in JSON). """
self.defaultValueAnnotation = None
""" Specified value if missing from instance.
Type `Annotation` (represented as `dict` in JSON). """
self.defaultValueAttachment = None
""" Specified value if missing from instance.
Type `Attachment` (represented as `dict` in JSON). """
self.defaultValueBase64Binary = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueBoolean = None
""" Specified value if missing from instance.
Type `bool`. """
self.defaultValueCode = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueCodeableConcept = None
""" Specified value if missing from instance.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.defaultValueCoding = None
""" Specified value if missing from instance.
Type `Coding` (represented as `dict` in JSON). """
self.defaultValueContactPoint = None
""" Specified value if missing from instance.
Type `ContactPoint` (represented as `dict` in JSON). """
self.defaultValueCount = None
""" Specified value if missing from instance.
Type `Count` (represented as `dict` in JSON). """
self.defaultValueDate = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDateTime = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDecimal = None
""" Specified value if missing from instance.
Type `float`. """
self.defaultValueDistance = None
""" Specified value if missing from instance.
Type `Distance` (represented as `dict` in JSON). """
self.defaultValueDuration = None
""" Specified value if missing from instance.
Type `Duration` (represented as `dict` in JSON). """
self.defaultValueHumanName = None
""" Specified value if missing from instance.
Type `HumanName` (represented as `dict` in JSON). """
self.defaultValueId = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueIdentifier = None
""" Specified value if missing from instance.
Type `Identifier` (represented as `dict` in JSON). """
self.defaultValueInstant = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueInteger = None
""" Specified value if missing from instance.
Type `int`. """
self.defaultValueMarkdown = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueMeta = None
""" Specified value if missing from instance.
Type `Meta` (represented as `dict` in JSON). """
self.defaultValueMoney = None
""" Specified value if missing from instance.
Type `Money` (represented as `dict` in JSON). """
self.defaultValueOid = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValuePeriod = None
""" Specified value if missing from instance.
Type `Period` (represented as `dict` in JSON). """
self.defaultValuePositiveInt = None
""" Specified value if missing from instance.
Type `int`. """
self.defaultValueQuantity = None
""" Specified value if missing from instance.
Type `Quantity` (represented as `dict` in JSON). """
self.defaultValueRange = None
""" Specified value if missing from instance.
Type `Range` (represented as `dict` in JSON). """
self.defaultValueRatio = None
""" Specified value if missing from instance.
Type `Ratio` (represented as `dict` in JSON). """
self.defaultValueReference = None
""" Specified value if missing from instance.
Type `FHIRReference` (represented as `dict` in JSON). """
self.defaultValueSampledData = None
""" Specified value if missing from instance.
Type `SampledData` (represented as `dict` in JSON). """
self.defaultValueSignature = None
""" Specified value if missing from instance.
Type `Signature` (represented as `dict` in JSON). """
self.defaultValueString = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueTime = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueTiming = None
""" Specified value if missing from instance.
Type `Timing` (represented as `dict` in JSON). """
self.defaultValueUnsignedInt = None
""" Specified value if missing from instance.
Type `int`. """
self.defaultValueUri = None
""" Specified value if missing from instance.
Type `str`. """
self.definition = None
""" Full formal definition as narrative text.
Type `str`. """
self.example = None
""" Example value (as defined for type).
List of `ElementDefinitionExample` items (represented as `dict` in JSON). """
self.fixedAddress = None
""" Value must be exactly this.
Type `Address` (represented as `dict` in JSON). """
self.fixedAge = None
""" Value must be exactly this.
Type `Age` (represented as `dict` in JSON). """
self.fixedAnnotation = None
""" Value must be exactly this.
Type `Annotation` (represented as `dict` in JSON). """
self.fixedAttachment = None
""" Value must be exactly this.
Type `Attachment` (represented as `dict` in JSON). """
self.fixedBase64Binary = None
""" Value must be exactly this.
Type `str`. """
self.fixedBoolean = None
""" Value must be exactly this.
Type `bool`. """
self.fixedCode = None
""" Value must be exactly this.
Type `str`. """
self.fixedCodeableConcept = None
""" Value must be exactly this.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.fixedCoding = None
""" Value must be exactly this.
Type `Coding` (represented as `dict` in JSON). """
self.fixedContactPoint = None
""" Value must be exactly this.
Type `ContactPoint` (represented as `dict` in JSON). """
self.fixedCount = None
""" Value must be exactly this.
Type `Count` (represented as `dict` in JSON). """
self.fixedDate = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedDateTime = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedDecimal = None
""" Value must be exactly this.
Type `float`. """
self.fixedDistance = None
""" Value must be exactly this.
Type `Distance` (represented as `dict` in JSON). """
self.fixedDuration = None
""" Value must be exactly this.
Type `Duration` (represented as `dict` in JSON). """
self.fixedHumanName = None
""" Value must be exactly this.
Type `HumanName` (represented as `dict` in JSON). """
self.fixedId = None
""" Value must be exactly this.
Type `str`. """
self.fixedIdentifier = None
""" Value must be exactly this.
Type `Identifier` (represented as `dict` in JSON). """
self.fixedInstant = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedInteger = None
""" Value must be exactly this.
Type `int`. """
self.fixedMarkdown = None
""" Value must be exactly this.
Type `str`. """
self.fixedMeta = None
""" Value must be exactly this.
Type `Meta` (represented as `dict` in JSON). """
self.fixedMoney = None
""" Value must be exactly this.
Type `Money` (represented as `dict` in JSON). """
self.fixedOid = None
""" Value must be exactly this.
Type `str`. """
self.fixedPeriod = None
""" Value must be exactly this.
Type `Period` (represented as `dict` in JSON). """
self.fixedPositiveInt = None
""" Value must be exactly this.
Type `int`. """
self.fixedQuantity = None
""" Value must be exactly this.
Type `Quantity` (represented as `dict` in JSON). """
self.fixedRange = None
""" Value must be exactly this.
Type `Range` (represented as `dict` in JSON). """
self.fixedRatio = None
""" Value must be exactly this.
Type `Ratio` (represented as `dict` in JSON). """
self.fixedReference = None
""" Value must be exactly this.
Type `FHIRReference` (represented as `dict` in JSON). """
self.fixedSampledData = None
""" Value must be exactly this.
Type `SampledData` (represented as `dict` in JSON). """
self.fixedSignature = None
""" Value must be exactly this.
Type `Signature` (represented as `dict` in JSON). """
self.fixedString = None
""" Value must be exactly this.
Type `str`. """
self.fixedTime = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedTiming = None
""" Value must be exactly this.
Type `Timing` (represented as `dict` in JSON). """
self.fixedUnsignedInt = None
""" Value must be exactly this.
Type `int`. """
self.fixedUri = None
""" Value must be exactly this.
Type `str`. """
self.isModifier = None
""" If this modifies the meaning of other elements.
Type `bool`. """
self.isSummary = None
""" Include when _summary = true?.
Type `bool`. """
self.label = None
""" Name for element to display with or prompt for element.
Type `str`. """
self.mapping = None
""" Map element to another set of definitions.
List of `ElementDefinitionMapping` items (represented as `dict` in JSON). """
self.max = None
""" Maximum Cardinality (a number or *).
Type `str`. """
self.maxLength = None
""" Max length for strings.
Type `int`. """
self.maxValueDate = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueDateTime = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueDecimal = None
""" Maximum Allowed Value (for some types).
Type `float`. """
self.maxValueInstant = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueInteger = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.maxValuePositiveInt = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.maxValueQuantity = None
""" Maximum Allowed Value (for some types).
Type `Quantity` (represented as `dict` in JSON). """
self.maxValueTime = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueUnsignedInt = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.meaningWhenMissing = None
""" Implicit meaning when this element is missing.
Type `str`. """
self.min = None
""" Minimum Cardinality.
Type `int`. """
self.minValueDate = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueDateTime = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueDecimal = None
""" Minimum Allowed Value (for some types).
Type `float`. """
self.minValueInstant = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueInteger = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.minValuePositiveInt = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.minValueQuantity = None
""" Minimum Allowed Value (for some types).
Type `Quantity` (represented as `dict` in JSON). """
self.minValueTime = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueUnsignedInt = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.mustSupport = None
""" If the element must supported.
Type `bool`. """
self.orderMeaning = None
""" What the order of the elements means.
Type `str`. """
self.path = None
""" Path of the element in the hierarchy of elements.
Type `str`. """
self.patternAddress = None
""" Value must have at least these property values.
Type `Address` (represented as `dict` in JSON). """
self.patternAge = None
""" Value must have at least these property values.
Type `Age` (represented as `dict` in JSON). """
self.patternAnnotation = None
""" Value must have at least these property values.
Type `Annotation` (represented as `dict` in JSON). """
self.patternAttachment = None
""" Value must have at least these property values.
Type `Attachment` (represented as `dict` in JSON). """
self.patternBase64Binary = None
""" Value must have at least these property values.
Type `str`. """
self.patternBoolean = None
""" Value must have at least these property values.
Type `bool`. """
self.patternCode = None
""" Value must have at least these property values.
Type `str`. """
self.patternCodeableConcept = None
""" Value must have at least these property values.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.patternCoding = None
""" Value must have at least these property values.
Type `Coding` (represented as `dict` in JSON). """
self.patternContactPoint = None
""" Value must have at least these property values.
Type `ContactPoint` (represented as `dict` in JSON). """
self.patternCount = None
""" Value must have at least these property values.
Type `Count` (represented as `dict` in JSON). """
self.patternDate = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternDateTime = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternDecimal = None
""" Value must have at least these property values.
Type `float`. """
self.patternDistance = None
""" Value must have at least these property values.
Type `Distance` (represented as `dict` in JSON). """
self.patternDuration = None
""" Value must have at least these property values.
Type `Duration` (represented as `dict` in JSON). """
self.patternHumanName = None
""" Value must have at least these property values.
Type `HumanName` (represented as `dict` in JSON). """
self.patternId = None
""" Value must have at least these property values.
Type `str`. """
self.patternIdentifier = None
""" Value must have at least these property values.
Type `Identifier` (represented as `dict` in JSON). """
self.patternInstant = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternInteger = None
""" Value must have at least these property values.
Type `int`. """
self.patternMarkdown = None
""" Value must have at least these property values.
Type `str`. """
self.patternMeta = None
""" Value must have at least these property values.
Type `Meta` (represented as `dict` in JSON). """
self.patternMoney = None
""" Value must have at least these property values.
Type `Money` (represented as `dict` in JSON). """
self.patternOid = None
""" Value must have at least these property values.
Type `str`. """
self.patternPeriod = None
""" Value must have at least these property values.
Type `Period` (represented as `dict` in JSON). """
self.patternPositiveInt = None
""" Value must have at least these property values.
Type `int`. """
self.patternQuantity = None
""" Value must have at least these property values.
Type `Quantity` (represented as `dict` in JSON). """
self.patternRange = None
""" Value must have at least these property values.
Type `Range` (represented as `dict` in JSON). """
self.patternRatio = None
""" Value must have at least these property values.
Type `Ratio` (represented as `dict` in JSON). """
self.patternReference = None
""" Value must have at least these property values.
Type `FHIRReference` (represented as `dict` in JSON). """
self.patternSampledData = None
""" Value must have at least these property values.
Type `SampledData` (represented as `dict` in JSON). """
self.patternSignature = None
""" Value must have at least these property values.
Type `Signature` (represented as `dict` in JSON). """
self.patternString = None
""" Value must have at least these property values.
Type `str`. """
self.patternTime = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternTiming = None
""" Value must have at least these property values.
Type `Timing` (represented as `dict` in JSON). """
self.patternUnsignedInt = None
""" Value must have at least these property values.
Type `int`. """
self.patternUri = None
""" Value must have at least these property values.
Type `str`. """
self.representation = None
""" xmlAttr | xmlText | typeAttr | cdaText | xhtml.
List of `str` items. """
self.requirements = None
""" Why this resource has been created.
Type `str`. """
self.short = None
""" Concise definition for space-constrained presentation.
Type `str`. """
self.sliceName = None
""" Name for this particular element (in a set of slices).
Type `str`. """
self.slicing = None
""" This element is sliced - slices follow.
Type `ElementDefinitionSlicing` (represented as `dict` in JSON). """
self.type = None
""" Data type and Profile for this element.
List of `ElementDefinitionType` items (represented as `dict` in JSON). """
super(ElementDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinition, self).elementProperties()
js.extend([
("alias", "alias", str, True, None, False),
("base", "base", ElementDefinitionBase, False, None, False),
("binding", "binding", ElementDefinitionBinding, False, None, False),
("code", "code", coding.Coding, True, None, False),
("comment", "comment", str, False, None, False),
("condition", "condition", str, True, None, False),
("constraint", "constraint", ElementDefinitionConstraint, True, None, False),
("contentReference", "contentReference", str, False, None, False),
("defaultValueAddress", "defaultValueAddress", address.Address, False, "defaultValue", False),
("defaultValueAge", "defaultValueAge", age.Age, False, "defaultValue", False),
("defaultValueAnnotation", "defaultValueAnnotation", annotation.Annotation, False, "defaultValue", False),
("defaultValueAttachment", "defaultValueAttachment", attachment.Attachment, False, "defaultValue", False),
("defaultValueBase64Binary", "defaultValueBase64Binary", str, False, "defaultValue", False),
("defaultValueBoolean", "defaultValueBoolean", bool, False, "defaultValue", False),
("defaultValueCode", "defaultValueCode", str, False, "defaultValue", False),
("defaultValueCodeableConcept", "defaultValueCodeableConcept", codeableconcept.CodeableConcept, False, "defaultValue", False),
("defaultValueCoding", "defaultValueCoding", coding.Coding, False, "defaultValue", False),
("defaultValueContactPoint", "defaultValueContactPoint", contactpoint.ContactPoint, False, "defaultValue", False),
("defaultValueCount", "defaultValueCount", count.Count, False, "defaultValue", False),
("defaultValueDate", "defaultValueDate", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueDateTime", "defaultValueDateTime", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueDecimal", "defaultValueDecimal", float, False, "defaultValue", False),
("defaultValueDistance", "defaultValueDistance", distance.Distance, False, "defaultValue", False),
("defaultValueDuration", "defaultValueDuration", duration.Duration, False, "defaultValue", False),
("defaultValueHumanName", "defaultValueHumanName", humanname.HumanName, False, "defaultValue", False),
("defaultValueId", "defaultValueId", str, False, "defaultValue", False),
("defaultValueIdentifier", "defaultValueIdentifier", identifier.Identifier, False, "defaultValue", False),
("defaultValueInstant", "defaultValueInstant", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueInteger", "defaultValueInteger", int, False, "defaultValue", False),
("defaultValueMarkdown", "defaultValueMarkdown", str, False, "defaultValue", False),
("defaultValueMeta", "defaultValueMeta", meta.Meta, False, "defaultValue", False),
("defaultValueMoney", "defaultValueMoney", money.Money, False, "defaultValue", False),
("defaultValueOid", "defaultValueOid", str, False, "defaultValue", False),
("defaultValuePeriod", "defaultValuePeriod", period.Period, False, "defaultValue", False),
("defaultValuePositiveInt", "defaultValuePositiveInt", int, False, "defaultValue", False),
("defaultValueQuantity", "defaultValueQuantity", quantity.Quantity, False, "defaultValue", False),
("defaultValueRange", "defaultValueRange", range.Range, False, "defaultValue", False),
("defaultValueRatio", "defaultValueRatio", ratio.Ratio, False, "defaultValue", False),
("defaultValueReference", "defaultValueReference", fhirreference.FHIRReference, False, "defaultValue", False),
("defaultValueSampledData", "defaultValueSampledData", sampleddata.SampledData, False, "defaultValue", False),
("defaultValueSignature", "defaultValueSignature", signature.Signature, False, "defaultValue", False),
("defaultValueString", "defaultValueString", str, False, "defaultValue", False),
("defaultValueTime", "defaultValueTime", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueTiming", "defaultValueTiming", timing.Timing, False, "defaultValue", False),
("defaultValueUnsignedInt", "defaultValueUnsignedInt", int, False, "defaultValue", False),
("defaultValueUri", "defaultValueUri", str, False, "defaultValue", False),
("definition", "definition", str, False, None, False),
("example", "example", ElementDefinitionExample, True, None, False),
("fixedAddress", "fixedAddress", address.Address, False, "fixed", False),
("fixedAge", "fixedAge", age.Age, False, "fixed", False),
("fixedAnnotation", "fixedAnnotation", annotation.Annotation, False, "fixed", False),
("fixedAttachment", "fixedAttachment", attachment.Attachment, False, "fixed", False),
("fixedBase64Binary", "fixedBase64Binary", str, False, "fixed", False),
("fixedBoolean", "fixedBoolean", bool, False, "fixed", False),
("fixedCode", "fixedCode", str, False, "fixed", False),
("fixedCodeableConcept", "fixedCodeableConcept", codeableconcept.CodeableConcept, False, "fixed", False),
("fixedCoding", "fixedCoding", coding.Coding, False, "fixed", False),
("fixedContactPoint", "fixedContactPoint", contactpoint.ContactPoint, False, "fixed", False),
("fixedCount", "fixedCount", count.Count, False, "fixed", False),
("fixedDate", "fixedDate", fhirdate.FHIRDate, False, "fixed", False),
("fixedDateTime", "fixedDateTime", fhirdate.FHIRDate, False, "fixed", False),
("fixedDecimal", "fixedDecimal", float, False, "fixed", False),
("fixedDistance", "fixedDistance", distance.Distance, False, "fixed", False),
("fixedDuration", "fixedDuration", duration.Duration, False, "fixed", False),
("fixedHumanName", "fixedHumanName", humanname.HumanName, False, "fixed", False),
("fixedId", "fixedId", str, False, "fixed", False),
("fixedIdentifier", "fixedIdentifier", identifier.Identifier, False, "fixed", False),
("fixedInstant", "fixedInstant", fhirdate.FHIRDate, False, "fixed", False),
("fixedInteger", "fixedInteger", int, False, "fixed", False),
("fixedMarkdown", "fixedMarkdown", str, False, "fixed", False),
("fixedMeta", "fixedMeta", meta.Meta, False, "fixed", False),
("fixedMoney", "fixedMoney", money.Money, False, "fixed", False),
("fixedOid", "fixedOid", str, False, "fixed", False),
("fixedPeriod", "fixedPeriod", period.Period, False, "fixed", False),
("fixedPositiveInt", "fixedPositiveInt", int, False, "fixed", False),
("fixedQuantity", "fixedQuantity", quantity.Quantity, False, "fixed", False),
("fixedRange", "fixedRange", range.Range, False, "fixed", False),
("fixedRatio", "fixedRatio", ratio.Ratio, False, "fixed", False),
("fixedReference", "fixedReference", fhirreference.FHIRReference, False, "fixed", False),
("fixedSampledData", "fixedSampledData", sampleddata.SampledData, False, "fixed", False),
("fixedSignature", "fixedSignature", signature.Signature, False, "fixed", False),
("fixedString", "fixedString", str, False, "fixed", False),
("fixedTime", "fixedTime", fhirdate.FHIRDate, False, "fixed", False),
("fixedTiming", "fixedTiming", timing.Timing, False, "fixed", False),
("fixedUnsignedInt", "fixedUnsignedInt", int, False, "fixed", False),
("fixedUri", "fixedUri", str, False, "fixed", False),
("isModifier", "isModifier", bool, False, None, False),
("isSummary", "isSummary", bool, False, None, False),
("label", "label", str, False, None, False),
("mapping", "mapping", ElementDefinitionMapping, True, None, False),
("max", "max", str, False, None, False),
("maxLength", "maxLength", int, False, None, False),
("maxValueDate", "maxValueDate", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueDateTime", "maxValueDateTime", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueDecimal", "maxValueDecimal", float, False, "maxValue", False),
("maxValueInstant", "maxValueInstant", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueInteger", "maxValueInteger", int, False, "maxValue", False),
("maxValuePositiveInt", "maxValuePositiveInt", int, False, "maxValue", False),
("maxValueQuantity", "maxValueQuantity", quantity.Quantity, False, "maxValue", False),
("maxValueTime", "maxValueTime", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueUnsignedInt", "maxValueUnsignedInt", int, False, "maxValue", False),
("meaningWhenMissing", "meaningWhenMissing", str, False, None, False),
("min", "min", int, False, None, False),
("minValueDate", "minValueDate", fhirdate.FHIRDate, False, "minValue", False),
("minValueDateTime", "minValueDateTime", fhirdate.FHIRDate, False, "minValue", False),
("minValueDecimal", "minValueDecimal", float, False, "minValue", False),
("minValueInstant", "minValueInstant", fhirdate.FHIRDate, False, "minValue", False),
("minValueInteger", "minValueInteger", int, False, "minValue", False),
("minValuePositiveInt", "minValuePositiveInt", int, False, "minValue", False),
("minValueQuantity", "minValueQuantity", quantity.Quantity, False, "minValue", False),
("minValueTime", "minValueTime", fhirdate.FHIRDate, False, "minValue", False),
("minValueUnsignedInt", "minValueUnsignedInt", int, False, "minValue", False),
("mustSupport", "mustSupport", bool, False, None, False),
("orderMeaning", "orderMeaning", str, False, None, False),
("path", "path", str, False, None, True),
("patternAddress", "patternAddress", address.Address, False, "pattern", False),
("patternAge", "patternAge", age.Age, False, "pattern", False),
("patternAnnotation", "patternAnnotation", annotation.Annotation, False, "pattern", False),
("patternAttachment", "patternAttachment", attachment.Attachment, False, "pattern", False),
("patternBase64Binary", "patternBase64Binary", str, False, "pattern", False),
("patternBoolean", "patternBoolean", bool, False, "pattern", False),
("patternCode", "patternCode", str, False, "pattern", False),
("patternCodeableConcept", "patternCodeableConcept", codeableconcept.CodeableConcept, False, "pattern", False),
("patternCoding", "patternCoding", coding.Coding, False, "pattern", False),
("patternContactPoint", "patternContactPoint", contactpoint.ContactPoint, False, "pattern", False),
("patternCount", "patternCount", count.Count, False, "pattern", False),
("patternDate", "patternDate", fhirdate.FHIRDate, False, "pattern", False),
("patternDateTime", "patternDateTime", fhirdate.FHIRDate, False, "pattern", False),
("patternDecimal", "patternDecimal", float, False, "pattern", False),
("patternDistance", "patternDistance", distance.Distance, False, "pattern", False),
("patternDuration", "patternDuration", duration.Duration, False, "pattern", False),
("patternHumanName", "patternHumanName", humanname.HumanName, False, "pattern", False),
("patternId", "patternId", str, False, "pattern", False),
("patternIdentifier", "patternIdentifier", identifier.Identifier, False, "pattern", False),
("patternInstant", "patternInstant", fhirdate.FHIRDate, False, "pattern", False),
("patternInteger", "patternInteger", int, False, "pattern", False),
("patternMarkdown", "patternMarkdown", str, False, "pattern", False),
("patternMeta", "patternMeta", meta.Meta, False, "pattern", False),
("patternMoney", "patternMoney", money.Money, False, "pattern", False),
("patternOid", "patternOid", str, False, "pattern", False),
("patternPeriod", "patternPeriod", period.Period, False, "pattern", False),
("patternPositiveInt", "patternPositiveInt", int, False, "pattern", False),
("patternQuantity", "patternQuantity", quantity.Quantity, False, "pattern", False),
("patternRange", "patternRange", range.Range, False, "pattern", False),
("patternRatio", "patternRatio", ratio.Ratio, False, "pattern", False),
("patternReference", "patternReference", fhirreference.FHIRReference, False, "pattern", False),
("patternSampledData", "patternSampledData", sampleddata.SampledData, False, "pattern", False),
("patternSignature", "patternSignature", signature.Signature, False, "pattern", False),
("patternString", "patternString", str, False, "pattern", False),
("patternTime", "patternTime", fhirdate.FHIRDate, False, "pattern", False),
("patternTiming", "patternTiming", timing.Timing, False, "pattern", False),
("patternUnsignedInt", "patternUnsignedInt", int, False, "pattern", False),
("patternUri", "patternUri", str, False, "pattern", False),
("representation", "representation", str, True, None, False),
("requirements", "requirements", str, False, None, False),
("short", "short", str, False, None, False),
("sliceName", "sliceName", str, False, None, False),
("slicing", "slicing", ElementDefinitionSlicing, False, None, False),
("type", "type", ElementDefinitionType, True, None, False),
])
return js
class ElementDefinitionBase(element.Element):
""" Base definition information for tools.
Information about the base definition of the element, provided to make it
unnecessary for tools to trace the deviation of the element through the
derived and related profiles. This information is provided when the element
definition is not the original definition of an element - i.g. either in a
constraint on another type, or for elements from a super type in a snap
shot.
"""
resource_type = "ElementDefinitionBase"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.max = None
""" Max cardinality of the base element.
Type `str`. """
self.min = None
""" Min cardinality of the base element.
Type `int`. """
self.path = None
""" Path that identifies the base element.
Type `str`. """
super(ElementDefinitionBase, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionBase, self).elementProperties()
js.extend([
("max", "max", str, False, None, True),
("min", "min", int, False, None, True),
("path", "path", str, False, None, True),
])
return js
class ElementDefinitionBinding(element.Element):
""" ValueSet details if this is coded.
Binds to a value set if this element is coded (code, Coding,
CodeableConcept, Quantity), or the data types (string, uri).
"""
resource_type = "ElementDefinitionBinding"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Human explanation of the value set.
Type `str`. """
self.strength = None
""" required | extensible | preferred | example.
Type `str`. """
self.valueSetReference = None
""" Source of value set.
Type `FHIRReference` referencing `ValueSet` (represented as `dict` in JSON). """
self.valueSetUri = None
""" Source of value set.
Type `str`. """
super(ElementDefinitionBinding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionBinding, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("strength", "strength", str, False, None, True),
("valueSetReference", "valueSetReference", fhirreference.FHIRReference, False, "valueSet", False),
("valueSetUri", "valueSetUri", str, False, "valueSet", False),
])
return js
class ElementDefinitionConstraint(element.Element):
""" Condition that must evaluate to true.
Formal constraints such as co-occurrence and other constraints that can be
computationally evaluated within the context of the instance.
"""
resource_type = "ElementDefinitionConstraint"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.expression = None
""" FHIRPath expression of constraint.
Type `str`. """
self.human = None
""" Human description of constraint.
Type `str`. """
self.key = None
""" Target of 'condition' reference above.
Type `str`. """
self.requirements = None
""" Why this constraint is necessary or appropriate.
Type `str`. """
self.severity = None
""" error | warning.
Type `str`. """
self.source = None
""" Reference to original source of constraint.
Type `str`. """
self.xpath = None
""" XPath expression of constraint.
Type `str`. """
super(ElementDefinitionConstraint, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionConstraint, self).elementProperties()
js.extend([
("expression", "expression", str, False, None, True),
("human", "human", str, False, None, True),
("key", "key", str, False, None, True),
("requirements", "requirements", str, False, None, False),
("severity", "severity", str, False, None, True),
("source", "source", str, False, None, False),
("xpath", "xpath", str, False, None, False),
])
return js
class ElementDefinitionExample(element.Element):
""" Example value (as defined for type).
A sample value for this element demonstrating the type of information that
would typically be found in the element.
"""
resource_type = "ElementDefinitionExample"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.label = None
""" Describes the purpose of this example.
Type `str`. """
self.valueAddress = None
""" Value of Example (one of allowed types).
Type `Address` (represented as `dict` in JSON). """
self.valueAge = None
""" Value of Example (one of allowed types).
Type `Age` (represented as `dict` in JSON). """
self.valueAnnotation = None
""" Value of Example (one of allowed types).
Type `Annotation` (represented as `dict` in JSON). """
self.valueAttachment = None
""" Value of Example (one of allowed types).
Type `Attachment` (represented as `dict` in JSON). """
self.valueBase64Binary = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueBoolean = None
""" Value of Example (one of allowed types).
Type `bool`. """
self.valueCode = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueCodeableConcept = None
""" Value of Example (one of allowed types).
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueCoding = None
""" Value of Example (one of allowed types).
Type `Coding` (represented as `dict` in JSON). """
self.valueContactPoint = None
""" Value of Example (one of allowed types).
Type `ContactPoint` (represented as `dict` in JSON). """
self.valueCount = None
""" Value of Example (one of allowed types).
Type `Count` (represented as `dict` in JSON). """
self.valueDate = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueDateTime = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueDecimal = None
""" Value of Example (one of allowed types).
Type `float`. """
self.valueDistance = None
""" Value of Example (one of allowed types).
Type `Distance` (represented as `dict` in JSON). """
self.valueDuration = None
""" Value of Example (one of allowed types).
Type `Duration` (represented as `dict` in JSON). """
self.valueHumanName = None
""" Value of Example (one of allowed types).
Type `HumanName` (represented as `dict` in JSON). """
self.valueId = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueIdentifier = None
""" Value of Example (one of allowed types).
Type `Identifier` (represented as `dict` in JSON). """
self.valueInstant = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueInteger = None
""" Value of Example (one of allowed types).
Type `int`. """
self.valueMarkdown = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueMeta = None
""" Value of Example (one of allowed types).
Type `Meta` (represented as `dict` in JSON). """
self.valueMoney = None
""" Value of Example (one of allowed types).
Type `Money` (represented as `dict` in JSON). """
self.valueOid = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valuePeriod = None
""" Value of Example (one of allowed types).
Type `Period` (represented as `dict` in JSON). """
self.valuePositiveInt = None
""" Value of Example (one of allowed types).
Type `int`. """
self.valueQuantity = None
""" Value of Example (one of allowed types).
Type `Quantity` (represented as `dict` in JSON). """
self.valueRange = None
""" Value of Example (one of allowed types).
Type `Range` (represented as `dict` in JSON). """
self.valueRatio = None
""" Value of Example (one of allowed types).
Type `Ratio` (represented as `dict` in JSON). """
self.valueReference = None
""" Value of Example (one of allowed types).
Type `FHIRReference` (represented as `dict` in JSON). """
self.valueSampledData = None
""" Value of Example (one of allowed types).
Type `SampledData` (represented as `dict` in JSON). """
self.valueSignature = None
""" Value of Example (one of allowed types).
Type `Signature` (represented as `dict` in JSON). """
self.valueString = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueTime = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueTiming = None
""" Value of Example (one of allowed types).
Type `Timing` (represented as `dict` in JSON). """
self.valueUnsignedInt = None
""" Value of Example (one of allowed types).
Type `int`. """
self.valueUri = None
""" Value of Example (one of allowed types).
Type `str`. """
super(ElementDefinitionExample, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionExample, self).elementProperties()
js.extend([
("label", "label", str, False, None, True),
("valueAddress", "valueAddress", address.Address, False, "value", True),
("valueAge", "valueAge", age.Age, False, "value", True),
("valueAnnotation", "valueAnnotation", annotation.Annotation, False, "value", True),
("valueAttachment", "valueAttachment", attachment.Attachment, False, "value", True),
("valueBase64Binary", "valueBase64Binary", str, False, "value", True),
("valueBoolean", "valueBoolean", bool, False, "value", True),
("valueCode", "valueCode", str, False, "value", True),
("valueCodeableConcept", "valueCodeableConcept", codeableconcept.CodeableConcept, False, "value", True),
("valueCoding", "valueCoding", coding.Coding, False, "value", True),
("valueContactPoint", "valueContactPoint", contactpoint.ContactPoint, False, "value", True),
("valueCount", "valueCount", count.Count, False, "value", True),
("valueDate", "valueDate", fhirdate.FHIRDate, False, "value", True),
("valueDateTime", "valueDateTime", fhirdate.FHIRDate, False, "value", True),
("valueDecimal", "valueDecimal", float, False, "value", True),
("valueDistance", "valueDistance", distance.Distance, False, "value", True),
("valueDuration", "valueDuration", duration.Duration, False, "value", True),
("valueHumanName", "valueHumanName", humanname.HumanName, False, "value", True),
("valueId", "valueId", str, False, "value", True),
("valueIdentifier", "valueIdentifier", identifier.Identifier, False, "value", True),
("valueInstant", "valueInstant", fhirdate.FHIRDate, False, "value", True),
("valueInteger", "valueInteger", int, False, "value", True),
("valueMarkdown", "valueMarkdown", str, False, "value", True),
("valueMeta", "valueMeta", meta.Meta, False, "value", True),
("valueMoney", "valueMoney", money.Money, False, "value", True),
("valueOid", "valueOid", str, False, "value", True),
("valuePeriod", "valuePeriod", period.Period, False, "value", True),
("valuePositiveInt", "valuePositiveInt", int, False, "value", True),
("valueQuantity", "valueQuantity", quantity.Quantity, False, "value", True),
("valueRange", "valueRange", range.Range, False, "value", True),
("valueRatio", "valueRatio", ratio.Ratio, False, "value", True),
("valueReference", "valueReference", fhirreference.FHIRReference, False, "value", True),
("valueSampledData", "valueSampledData", sampleddata.SampledData, False, "value", True),
("valueSignature", "valueSignature", signature.Signature, False, "value", True),
("valueString", "valueString", str, False, "value", True),
("valueTime", "valueTime", fhirdate.FHIRDate, False, "value", True),
("valueTiming", "valueTiming", timing.Timing, False, "value", True),
("valueUnsignedInt", "valueUnsignedInt", int, False, "value", True),
("valueUri", "valueUri", str, False, "value", True),
])
return js
class ElementDefinitionMapping(element.Element):
""" Map element to another set of definitions.
Identifies a concept from an external specification that roughly
corresponds to this element.
"""
resource_type = "ElementDefinitionMapping"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.comment = None
""" Comments about the mapping or its use.
Type `str`. """
self.identity = None
""" Reference to mapping declaration.
Type `str`. """
self.language = None
""" Computable language of mapping.
Type `str`. """
self.map = None
""" Details of the mapping.
Type `str`. """
super(ElementDefinitionMapping, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionMapping, self).elementProperties()
js.extend([
("comment", "comment", str, False, None, False),
("identity", "identity", str, False, None, True),
("language", "language", str, False, None, False),
("map", "map", str, False, None, True),
])
return js
class ElementDefinitionSlicing(element.Element):
""" This element is sliced - slices follow.
Indicates that the element is sliced into a set of alternative definitions
(i.e. in a structure definition, there are multiple different constraints
on a single element in the base resource). Slicing can be used in any
resource that has cardinality ..* on the base resource, or any resource
with a choice of types. The set of slices is any elements that come after
this in the element sequence that have the same path, until a shorter path
occurs (the shorter path terminates the set).
"""
resource_type = "ElementDefinitionSlicing"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Text description of how slicing works (or not).
Type `str`. """
self.discriminator = None
""" Element values that are used to distinguish the slices.
List of `ElementDefinitionSlicingDiscriminator` items (represented as `dict` in JSON). """
self.ordered = None
""" If elements must be in same order as slices.
Type `bool`. """
self.rules = None
""" closed | open | openAtEnd.
Type `str`. """
super(ElementDefinitionSlicing, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionSlicing, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("discriminator", "discriminator", ElementDefinitionSlicingDiscriminator, True, None, False),
("ordered", "ordered", bool, False, None, False),
("rules", "rules", str, False, None, True),
])
return js
class ElementDefinitionSlicingDiscriminator(element.Element):
""" Element values that are used to distinguish the slices.
Designates which child elements are used to discriminate between the slices
when processing an instance. If one or more discriminators are provided,
the value of the child elements in the instance data SHALL completely
distinguish which slice the element in the resource matches based on the
allowed values for those elements in each of the slices.
"""
resource_type = "ElementDefinitionSlicingDiscriminator"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.path = None
""" Path to element value.
Type `str`. """
self.type = None
""" value | exists | pattern | type | profile.
Type `str`. """
super(ElementDefinitionSlicingDiscriminator, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionSlicingDiscriminator, self).elementProperties()
js.extend([
("path", "path", str, False, None, True),
("type", "type", str, False, None, True),
])
return js
class ElementDefinitionType(element.Element):
""" Data type and Profile for this element.
The data type or resource that the value of this element is permitted to
be.
"""
resource_type = "ElementDefinitionType"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.aggregation = None
""" contained | referenced | bundled - how aggregated.
List of `str` items. """
self.code = None
""" Data type or Resource (reference to definition).
Type `str`. """
self.profile = None
""" Profile (StructureDefinition) to apply (or IG).
Type `str`. """
self.targetProfile = None
""" Profile (StructureDefinition) to apply to reference target (or IG).
Type `str`. """
self.versioning = None
""" either | independent | specific.
Type `str`. """
super(ElementDefinitionType, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionType, self).elementProperties()
js.extend([
("aggregation", "aggregation", str, True, None, False),
("code", "code", str, False, None, True),
("profile", "profile", str, False, None, False),
("targetProfile", "targetProfile", str, False, None, False),
("versioning", "versioning", str, False, None, False),
])
return js
import sys
try:
from . import address
except ImportError:
address = sys.modules[__package__ + '.address']
try:
from . import age
except ImportError:
age = sys.modules[__package__ + '.age']
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import count
except ImportError:
count = sys.modules[__package__ + '.count']
try:
from . import distance
except ImportError:
distance = sys.modules[__package__ + '.distance']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import humanname
except ImportError:
humanname = sys.modules[__package__ + '.humanname']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import meta
except ImportError:
meta = sys.modules[__package__ + '.meta']
try:
from . import money
except ImportError:
money = sys.modules[__package__ + '.money']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
try:
from . import ratio
except ImportError:
ratio = sys.modules[__package__ + '.ratio']
try:
from . import sampleddata
except ImportError:
sampleddata = sys.modules[__package__ + '.sampleddata']
try:
from . import signature
except ImportError:
signature = sys.modules[__package__ + '.signature']
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + '.timing']
| bsd-3-clause | 21d5494399f98486d7910baa767941cb | 41.267701 | 138 | 0.590479 | 4.571731 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/resource.py | 1 | 1814 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/Resource) on 2017-03-22.
# 2017, SMART Health IT.
from . import fhirabstractresource
class Resource(fhirabstractresource.FHIRAbstractResource):
""" Base Resource.
This is the base resource type for everything.
"""
resource_type = "Resource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.id = None
""" Logical id of this artifact.
Type `str`. """
self.implicitRules = None
""" A set of rules under which this content was created.
Type `str`. """
self.language = None
""" Language of the resource content.
Type `str`. """
self.meta = None
""" Metadata about the resource.
Type `Meta` (represented as `dict` in JSON). """
super(Resource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Resource, self).elementProperties()
js.extend([
("id", "id", str, False, None, False),
("implicitRules", "implicitRules", str, False, None, False),
("language", "language", str, False, None, False),
("meta", "meta", meta.Meta, False, None, False),
])
return js
import sys
try:
from . import meta
except ImportError:
meta = sys.modules[__package__ + '.meta']
| bsd-3-clause | 7f7bd4aa45e50f565d81ceda6c970df0 | 29.745763 | 100 | 0.585447 | 4.199074 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/detectedissue_tests.py | 1 | 4545 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import io
import json
import os
import unittest
from . import detectedissue
from .fhirdate import FHIRDate
class DetectedIssueTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("DetectedIssue", js["resourceType"])
return detectedissue.DetectedIssue(js)
def testDetectedIssue1(self):
inst = self.instantiate_from("detectedissue-example-allergy.json")
self.assertIsNotNone(inst, "Must have instantiated a DetectedIssue instance")
self.implDetectedIssue1(inst)
js = inst.as_json()
self.assertEqual("DetectedIssue", js["resourceType"])
inst2 = detectedissue.DetectedIssue(js)
self.implDetectedIssue1(inst2)
def implDetectedIssue1(self, inst):
self.assertEqual(inst.id, "allergy")
self.assertEqual(inst.text.div, "<div>[Put rendering here]</div>")
self.assertEqual(inst.text.status, "generated")
def testDetectedIssue2(self):
inst = self.instantiate_from("detectedissue-example-dup.json")
self.assertIsNotNone(inst, "Must have instantiated a DetectedIssue instance")
self.implDetectedIssue2(inst)
js = inst.as_json()
self.assertEqual("DetectedIssue", js["resourceType"])
inst2 = detectedissue.DetectedIssue(js)
self.implDetectedIssue2(inst2)
def implDetectedIssue2(self, inst):
self.assertEqual(inst.category.coding[0].code, "DUPTHPY")
self.assertEqual(inst.category.coding[0].display, "Duplicate Therapy Alert")
self.assertEqual(inst.category.coding[0].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.date.date, FHIRDate("2013-05-08").date)
self.assertEqual(inst.date.as_json(), "2013-05-08")
self.assertEqual(inst.detail, "Similar test was performed within the past 14 days")
self.assertEqual(inst.id, "duplicate")
self.assertEqual(inst.text.status, "generated")
def testDetectedIssue3(self):
inst = self.instantiate_from("detectedissue-example-lab.json")
self.assertIsNotNone(inst, "Must have instantiated a DetectedIssue instance")
self.implDetectedIssue3(inst)
js = inst.as_json()
self.assertEqual("DetectedIssue", js["resourceType"])
inst2 = detectedissue.DetectedIssue(js)
self.implDetectedIssue3(inst2)
def implDetectedIssue3(self, inst):
self.assertEqual(inst.id, "lab")
self.assertEqual(inst.text.div, "<div>[Put rendering here]</div>")
self.assertEqual(inst.text.status, "generated")
def testDetectedIssue4(self):
inst = self.instantiate_from("detectedissue-example.json")
self.assertIsNotNone(inst, "Must have instantiated a DetectedIssue instance")
self.implDetectedIssue4(inst)
js = inst.as_json()
self.assertEqual("DetectedIssue", js["resourceType"])
inst2 = detectedissue.DetectedIssue(js)
self.implDetectedIssue4(inst2)
def implDetectedIssue4(self, inst):
self.assertEqual(inst.category.coding[0].code, "DRG")
self.assertEqual(inst.category.coding[0].display, "Drug Interaction Alert")
self.assertEqual(inst.category.coding[0].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.date.date, FHIRDate("2014-01-05").date)
self.assertEqual(inst.date.as_json(), "2014-01-05")
self.assertEqual(inst.id, "ddi")
self.assertEqual(inst.mitigation[0].action.coding[0].code, "13")
self.assertEqual(inst.mitigation[0].action.coding[0].display, "Stopped Concurrent Therapy")
self.assertEqual(inst.mitigation[0].action.coding[0].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.mitigation[0].action.text, "Asked patient to discontinue regular use of Tylenol and to consult with clinician if they need to resume to allow appropriate INR monitoring")
self.assertEqual(inst.mitigation[0].date.date, FHIRDate("2014-01-05").date)
self.assertEqual(inst.mitigation[0].date.as_json(), "2014-01-05")
self.assertEqual(inst.severity, "high")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | cce3fb8c462356391624527f904c6cd8 | 44.45 | 200 | 0.676128 | 3.456274 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/tools/tool_libs/rdr_docs.py | 1 | 5500 | #! /bin/env python
#
# Tool to manage the RDR readthedocs project via its public API.
#
import argparse
import logging
import sys
from json import dumps
from time import sleep
from rdr_service.services.system_utils import setup_logging, setup_i18n
from rdr_service.services.documentation_utils import ReadTheDocsHandler
_logger = logging.getLogger("rdr_logger")
# Tool_cmd and tool_desc name are required.
# Remember to add/update bash completion in 'tool_lib/tools.bash'
tool_cmd = "rdr-docs"
tool_desc = "manage RDR documentation content on readthedocs.org"
class RTDBaseClass(object):
def __init__(self, args):
"""
:param args: command line arguments.
"""
self.args = args
self._rtd_handler = ReadTheDocsHandler()
class RTDBuildClass(RTDBaseClass):
def run(self):
build_id = self._rtd_handler.build_the_docs(self.args.slug)
_logger.info(f'Documentation build {build_id} started for version {self.args.slug}')
if not self.args.no_wait:
state = ""
json_data = None
while state.lower() != 'finished':
sleep(10)
json_data = self._rtd_handler.get_build_details(build_id)
if state != json_data['state']['name']:
state = json_data['state']['name']
_logger.info(f'{state}')
success = json_data['success']
if success:
_logger.info(f'"{self.args.slug}" build {build_id} succeeded')
else:
_logger.error(f'"{self.args.slug}" build {build_id} FAILED.')
_logger.error(dumps(json_data, indent=4))
return 0
class RTDListClass(RTDBaseClass):
def run(self):
if self.args.build:
_logger.info(dumps(self._rtd_handler.get_build_details(self.args.build), indent=4))
if self.args.version:
_logger.info(dumps(self._rtd_handler.get_version_details(self.args.version), indent=4))
if self.args.default_tag:
default = self._rtd_handler.get_project_details()['default_branch']
_logger.info(f'RDR docs latest version using RDR git tag {default}')
return 0
class RTDUpdateClass(RTDBaseClass):
def run(self):
if self.args.latest:
self._rtd_handler.update_project_to_release(self.args.latest)
_logger.info(f'RDR docs will now use git tag {self.args.latest} when building latest version')
return 0
def run():
# Set global debug value and setup application logging.
setup_logging(
_logger, tool_cmd, "--debug" in sys.argv, "{0}.log".format(tool_cmd) if "--log-file" in sys.argv else None
)
setup_i18n()
# Setup program arguments.
parser = argparse.ArgumentParser(prog=tool_cmd, description=tool_desc)
parser.add_argument("--debug", help="enable debug output", default=False, action="store_true") # noqa
parser.add_argument("--log-file", help="write output to a log file", default=False, action="store_true") # noqa
parser.add_argument("--project", help="gcp project name", default="localhost") # noqa
help_txt = "RDR documentation services. NOTE: requires RTD_API_TOKEN environment variable or "
help_txt += "readthedocs_creds entry in current_config. See prod or stable config for details"
subparser = parser.add_subparsers(help=help_txt)
# Trigger documentation build
build_parser = subparser.add_parser("build", help="Trigger a documentation build on readthedocs.org")
build_parser.add_argument("--slug", help="version slug (e.g., 'stable', 'latest') to build. Default is 'stable'",
default="stable", type=str, required=True)
build_parser.add_argument("--no-wait", help="Do not wait for build to complete",
default=False, action="store_true")
# Get item details from ReadTheDocs
list_parser = subparser.add_parser("list", help="Retrieve details on an item from readthedocs.org")
list_parser.add_argument("--build", help="Show build details for the specified build id", type=int)
list_parser.add_argument("--version",
help="Show version details for the specified version slug (e.g., 'stable' or 'latest')",
type=str)
list_parser.add_argument("--default-tag",
help="Show the current default_branch/tag for RDR 'latest' version", action="store_true")
# Update project settings in ReadTheDocs
update_parser = subparser.add_parser("update", help="Update the RDR readthedocs.org project settings")
update_parser.add_argument("--latest",
help="release git tag (X.Y.Z) to set as latest default_branch in readthedocs.org",
default=None, type=str)
args = parser.parse_args()
if hasattr(args, 'no_wait'):
process = RTDBuildClass(args)
exit_code = process.run()
elif hasattr(args, 'latest'):
process = RTDUpdateClass(args)
exit_code = process.run()
elif hasattr(args, 'build') or hasattr(args, 'version') or hasattr(args, 'default_tag'):
process = RTDListClass(args)
exit_code = process.run()
else:
_logger.info('Please select a service option to run. For help, use "rdr-docs --help"')
exit_code = 1
return exit_code
# --- Main Program Call ---
if __name__ == "__main__":
sys.exit(run())
| bsd-3-clause | 4311fd73b83c07207e0d7f0dbb2234d2 | 40.353383 | 118 | 0.628182 | 3.835425 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/1ebd26a7612c_add_origin_to_biobank_order.py | 1 | 1284 | """add_origin_to_biobank_order
Revision ID: 1ebd26a7612c
Revises: c2fa79ef2fca
Create Date: 2020-02-26 12:00:50.340756
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1ebd26a7612c'
down_revision = 'c2fa79ef2fca'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('biobank_history', sa.Column('order_origin', sa.String(length=80), nullable=True))
op.add_column('biobank_order', sa.Column('order_origin', sa.String(length=80), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('biobank_order', 'order_origin')
op.drop_column('biobank_history', 'order_origin')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 290b3ca0f08ad05bd12aaf889ca1806b | 24.176471 | 100 | 0.668224 | 3.34375 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/messageheader_tests.py | 1 | 2390 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import io
import json
import os
import unittest
from . import messageheader
from .fhirdate import FHIRDate
class MessageHeaderTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("MessageHeader", js["resourceType"])
return messageheader.MessageHeader(js)
def testMessageHeader1(self):
inst = self.instantiate_from("messageheader-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MessageHeader instance")
self.implMessageHeader1(inst)
js = inst.as_json()
self.assertEqual("MessageHeader", js["resourceType"])
inst2 = messageheader.MessageHeader(js)
self.implMessageHeader1(inst2)
def implMessageHeader1(self, inst):
self.assertEqual(inst.destination[0].endpoint, "llp:10.11.12.14:5432")
self.assertEqual(inst.destination[0].name, "Acme Message Gateway")
self.assertEqual(inst.event.code, "admin-update")
self.assertEqual(inst.event.system, "http://hl7.org/fhir/message-type")
self.assertEqual(inst.id, "1cbdfb97-5859-48a4-8301-d54eab818d68")
self.assertEqual(inst.reason.coding[0].code, "admit")
self.assertEqual(inst.reason.coding[0].system, "http://hl7.org/fhir/message-reasons-encounter")
self.assertEqual(inst.response.code, "ok")
self.assertEqual(inst.response.identifier, "5015fe84-8e76-4526-89d8-44b322e8d4fb")
self.assertEqual(inst.source.contact.system, "phone")
self.assertEqual(inst.source.contact.value, "+1 (555) 123 4567")
self.assertEqual(inst.source.endpoint, "llp:10.11.12.13:5432")
self.assertEqual(inst.source.name, "Acme Central Patient Registry")
self.assertEqual(inst.source.software, "FooBar Patient Manager")
self.assertEqual(inst.source.version, "3.1.45.AABB")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.timestamp.date, FHIRDate("2012-01-04T09:10:14Z").date)
self.assertEqual(inst.timestamp.as_json(), "2012-01-04T09:10:14Z")
| bsd-3-clause | ff15c1c8169be4ea5862e5eb02a5d9ef | 43.259259 | 103 | 0.6841 | 3.361463 | false | true | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/namingsystem.py | 1 | 6219 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/NamingSystem) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class NamingSystem(domainresource.DomainResource):
""" System of unique identification.
A curated namespace that issues unique symbols within that namespace for
the identification of concepts, people, devices, etc. Represents a
"System" used within the Identifier and Coding data types.
"""
resource_type = "NamingSystem"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the naming system.
Type `str`. """
self.jurisdiction = None
""" Intended jurisdiction for naming system (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.kind = None
""" codesystem | identifier | root.
Type `str`. """
self.name = None
""" Name for this naming system (computer friendly).
Type `str`. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.responsible = None
""" Who maintains system namespace?.
Type `str`. """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.type = None
""" e.g. driver, provider, patient, bank etc..
Type `CodeableConcept` (represented as `dict` in JSON). """
self.uniqueId = None
""" Unique identifiers used for system.
List of `NamingSystemUniqueId` items (represented as `dict` in JSON). """
self.usage = None
""" How/where is it used.
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
super(NamingSystem, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NamingSystem, self).elementProperties()
js.extend([
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("date", "date", fhirdate.FHIRDate, False, None, True),
("description", "description", str, False, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("kind", "kind", str, False, None, True),
("name", "name", str, False, None, True),
("publisher", "publisher", str, False, None, False),
("responsible", "responsible", str, False, None, False),
("status", "status", str, False, None, True),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
("uniqueId", "uniqueId", NamingSystemUniqueId, True, None, True),
("usage", "usage", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
])
return js
from . import backboneelement
class NamingSystemUniqueId(backboneelement.BackboneElement):
""" Unique identifiers used for system.
Indicates how the system may be identified when referenced in electronic
exchange.
"""
resource_type = "NamingSystemUniqueId"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.comment = None
""" Notes about identifier usage.
Type `str`. """
self.period = None
""" When is identifier valid?.
Type `Period` (represented as `dict` in JSON). """
self.preferred = None
""" Is this the id that should be used for this type.
Type `bool`. """
self.type = None
""" oid | uuid | uri | other.
Type `str`. """
self.value = None
""" The unique identifier.
Type `str`. """
super(NamingSystemUniqueId, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(NamingSystemUniqueId, self).elementProperties()
js.extend([
("comment", "comment", str, False, None, False),
("period", "period", period.Period, False, None, False),
("preferred", "preferred", bool, False, None, False),
("type", "type", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| bsd-3-clause | f60ae961171b1df534300d8169ce2b31 | 34.537143 | 109 | 0.589001 | 4.348951 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/valueset_tests.py | 1 | 36643 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import valueset
from .fhirdate import FHIRDate
class ValueSetTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ValueSet", js["resourceType"])
return valueset.ValueSet(js)
def testValueSet1(self):
inst = self.instantiate_from("valueset-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet1(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet1(inst2)
def implValueSet1(self, inst):
self.assertTrue(inst.compose.inactive)
self.assertEqual(inst.compose.include[0].concept[0].code, "14647-2")
self.assertEqual(inst.compose.include[0].concept[0].display, "Cholesterol [Moles/Volume]")
self.assertEqual(inst.compose.include[0].concept[1].code, "2093-3")
self.assertEqual(inst.compose.include[0].concept[1].display, "Cholesterol [Mass/Volume]")
self.assertEqual(inst.compose.include[0].concept[2].code, "35200-5")
self.assertEqual(inst.compose.include[0].concept[2].display, "Cholesterol [Mass Or Moles/Volume]")
self.assertEqual(inst.compose.include[0].concept[3].code, "9342-7")
self.assertEqual(inst.compose.include[0].concept[3].display, "Cholesterol [Percentile]")
self.assertEqual(inst.compose.include[0].system, "http://loinc.org")
self.assertEqual(inst.compose.include[0].version, "2.36")
self.assertEqual(inst.compose.lockedDate.date, FHIRDate("2012-06-13").date)
self.assertEqual(inst.compose.lockedDate.as_json(), "2012-06-13")
self.assertEqual(inst.contact[0].name, "FHIR project team")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.copyright, "This content from LOINC ® is copyright © 1995 Regenstrief Institute, Inc. and the LOINC Committee, and available at no cost under the license at http://loinc.org/terms-of-use.")
self.assertEqual(inst.date.date, FHIRDate("2015-06-22").date)
self.assertEqual(inst.date.as_json(), "2015-06-22")
self.assertEqual(inst.description, "This is an example value set that includes all the LOINC codes for serum/plasma cholesterol from v2.36.")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-extensional")
self.assertEqual(inst.identifier[0].system, "http://acme.com/identifiers/valuesets")
self.assertEqual(inst.identifier[0].value, "loinc-cholesterol-int")
self.assertEqual(inst.jurisdiction[0].coding[0].code, "US")
self.assertEqual(inst.jurisdiction[0].coding[0].system, "urn:iso:std:iso:3166")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "LOINC Codes for Cholesterol in Serum/Plasma")
self.assertEqual(inst.publisher, "HL7 International")
self.assertEqual(inst.purpose, "This value set was published by ACME Inc in order to make clear which codes are used for Cholesterol by AcmeClinicals (Adult Ambulatory care support in USA)")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-extensional")
self.assertEqual(inst.useContext[0].code.code, "age")
self.assertEqual(inst.useContext[0].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[0].valueQuantity.code, "a")
self.assertEqual(inst.useContext[0].valueQuantity.comparator, ">")
self.assertEqual(inst.useContext[0].valueQuantity.system, "http://unitsofmeasure.org")
self.assertEqual(inst.useContext[0].valueQuantity.unit, "yrs")
self.assertEqual(inst.useContext[0].valueQuantity.value, 18)
self.assertEqual(inst.version, "20150622")
def testValueSet2(self):
inst = self.instantiate_from("valueset-example-hierarchical.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet2(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet2(inst2)
def implValueSet2(self, inst):
self.assertEqual(inst.compose.include[0].concept[0].code, "invalid")
self.assertEqual(inst.compose.include[0].concept[1].code, "structure")
self.assertEqual(inst.compose.include[0].concept[2].code, "required")
self.assertEqual(inst.compose.include[0].concept[3].code, "value")
self.assertEqual(inst.compose.include[0].concept[4].code, "processing")
self.assertEqual(inst.compose.include[0].concept[5].code, "duplicate")
self.assertEqual(inst.compose.include[0].concept[6].code, "not-found")
self.assertEqual(inst.compose.include[0].concept[7].code, "conflict")
self.assertEqual(inst.compose.include[0].concept[8].code, "lock")
self.assertEqual(inst.compose.include[0].concept[9].code, "exception")
self.assertEqual(inst.compose.include[0].extension[0].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-rules")
self.assertEqual(inst.compose.include[0].extension[0].valueCode, "groups-only")
self.assertEqual(inst.compose.include[0].extension[1].extension[0].url, "display")
self.assertEqual(inst.compose.include[0].extension[1].extension[0].valueString, "(Most common)")
self.assertEqual(inst.compose.include[0].extension[1].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[1].extension[1].valueCode, "login")
self.assertEqual(inst.compose.include[0].extension[1].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[1].extension[2].valueCode, "conflict")
self.assertEqual(inst.compose.include[0].extension[1].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].extension[2].extension[0].url, "code")
self.assertEqual(inst.compose.include[0].extension[2].extension[0].valueString, "processing")
self.assertEqual(inst.compose.include[0].extension[2].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[2].extension[1].valueCode, "duplicate")
self.assertEqual(inst.compose.include[0].extension[2].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[2].extension[2].valueCode, "not-found")
self.assertEqual(inst.compose.include[0].extension[2].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].extension[3].extension[0].url, "code")
self.assertEqual(inst.compose.include[0].extension[3].extension[0].valueString, "invalid")
self.assertEqual(inst.compose.include[0].extension[3].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[3].extension[1].valueCode, "structure")
self.assertEqual(inst.compose.include[0].extension[3].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[3].extension[2].valueCode, "required")
self.assertEqual(inst.compose.include[0].extension[3].extension[3].url, "value")
self.assertEqual(inst.compose.include[0].extension[3].extension[3].valueCode, "required")
self.assertEqual(inst.compose.include[0].extension[3].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].extension[4].extension[0].url, "code")
self.assertEqual(inst.compose.include[0].extension[4].extension[0].valueString, "transient")
self.assertEqual(inst.compose.include[0].extension[4].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[4].extension[1].valueCode, "lock")
self.assertEqual(inst.compose.include[0].extension[4].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[4].extension[2].valueCode, "exception")
self.assertEqual(inst.compose.include[0].extension[4].extension[3].url, "value")
self.assertEqual(inst.compose.include[0].extension[4].extension[3].valueCode, "throttled")
self.assertEqual(inst.compose.include[0].extension[4].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].extension[5].extension[0].url, "code")
self.assertEqual(inst.compose.include[0].extension[5].extension[0].valueString, "security")
self.assertEqual(inst.compose.include[0].extension[5].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[5].extension[1].valueCode, "login")
self.assertEqual(inst.compose.include[0].extension[5].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[5].extension[2].valueCode, "unknown")
self.assertEqual(inst.compose.include[0].extension[5].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].system, "#hacked")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.contained[0].id, "hacked")
self.assertEqual(inst.date.date, FHIRDate("2018-07-20").date)
self.assertEqual(inst.date.as_json(), "2018-07-20")
self.assertEqual(inst.description, "Demonstration of extensions that build a hierarchical contains")
self.assertTrue(inst.expansion.contains[0].abstract)
self.assertEqual(inst.expansion.contains[0].contains[0].code, "login")
self.assertEqual(inst.expansion.contains[0].contains[0].display, "Login Required")
self.assertEqual(inst.expansion.contains[0].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[0].contains[1].code, "conflict")
self.assertEqual(inst.expansion.contains[0].contains[1].display, "Edit Version Conflict")
self.assertEqual(inst.expansion.contains[0].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[0].display, "(Most common)")
self.assertEqual(inst.expansion.contains[1].code, "processing")
self.assertEqual(inst.expansion.contains[1].contains[0].code, "duplicate")
self.assertEqual(inst.expansion.contains[1].contains[0].display, "Duplicate")
self.assertEqual(inst.expansion.contains[1].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[1].contains[1].code, "not-found")
self.assertEqual(inst.expansion.contains[1].contains[1].display, "Not Found")
self.assertEqual(inst.expansion.contains[1].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[1].display, "Processing Failure")
self.assertEqual(inst.expansion.contains[1].system, "http://hl7.org/fhir/hacked")
self.assertTrue(inst.expansion.contains[2].abstract)
self.assertEqual(inst.expansion.contains[2].code, "invalid")
self.assertEqual(inst.expansion.contains[2].contains[0].code, "structure")
self.assertEqual(inst.expansion.contains[2].contains[0].display, "Structural Issue")
self.assertEqual(inst.expansion.contains[2].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[2].contains[1].code, "required")
self.assertEqual(inst.expansion.contains[2].contains[1].display, "Required element missing")
self.assertEqual(inst.expansion.contains[2].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[2].contains[2].code, "value")
self.assertEqual(inst.expansion.contains[2].contains[2].display, "Element value invalid")
self.assertEqual(inst.expansion.contains[2].contains[2].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[2].display, "Invalid Content")
self.assertEqual(inst.expansion.contains[2].system, "http://hl7.org/fhir/hacked")
self.assertTrue(inst.expansion.contains[3].abstract)
self.assertEqual(inst.expansion.contains[3].code, "transient")
self.assertEqual(inst.expansion.contains[3].contains[0].code, "lock-error")
self.assertEqual(inst.expansion.contains[3].contains[0].display, "Lock Error")
self.assertEqual(inst.expansion.contains[3].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[3].contains[1].code, "exception")
self.assertEqual(inst.expansion.contains[3].contains[1].display, "Exception")
self.assertEqual(inst.expansion.contains[3].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[3].contains[2].code, "throttled")
self.assertEqual(inst.expansion.contains[3].contains[2].display, "Throttled")
self.assertEqual(inst.expansion.contains[3].contains[2].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[3].display, "Transient Issue")
self.assertEqual(inst.expansion.contains[3].system, "http://hl7.org/fhir/hacked")
self.assertTrue(inst.expansion.contains[4].abstract)
self.assertEqual(inst.expansion.contains[4].code, "security")
self.assertEqual(inst.expansion.contains[4].contains[0].code, "login")
self.assertEqual(inst.expansion.contains[4].contains[0].display, "Login Required")
self.assertEqual(inst.expansion.contains[4].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[4].contains[1].code, "unknown")
self.assertEqual(inst.expansion.contains[4].contains[1].display, "Unknown User")
self.assertEqual(inst.expansion.contains[4].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[4].display, "Security Problem")
self.assertEqual(inst.expansion.contains[4].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.identifier, "urn:uuid:42316ff8-2714-4680-9980-f37a6d1a71bc")
self.assertEqual(inst.expansion.parameter[0].name, "excludeNotForUI")
self.assertEqual(inst.expansion.parameter[0].valueUri, "false")
self.assertEqual(inst.expansion.timestamp.date, FHIRDate("2018-07-20T23:14:07+10:00").date)
self.assertEqual(inst.expansion.timestamp.as_json(), "2018-07-20T23:14:07+10:00")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-hierarchical")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "Example Hierarchical ValueSet")
self.assertEqual(inst.publisher, "FHIR Project team")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-hierarchical")
self.assertEqual(inst.version, "4.0.0")
def testValueSet3(self):
inst = self.instantiate_from("valueset-example-expansion.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet3(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet3(inst2)
def implValueSet3(self, inst):
self.assertEqual(inst.compose.include[0].filter[0].op, "=")
self.assertEqual(inst.compose.include[0].filter[0].property, "parent")
self.assertEqual(inst.compose.include[0].filter[0].value, "LP43571-6")
self.assertEqual(inst.compose.include[0].system, "http://loinc.org")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.copyright, "This content from LOINC® is copyright © 1995 Regenstrief Institute, Inc. and the LOINC Committee, and available at no cost under the license at http://loinc.org/terms-of-use.")
self.assertEqual(inst.date.date, FHIRDate("2015-06-22").date)
self.assertEqual(inst.date.as_json(), "2015-06-22")
self.assertEqual(inst.description, "This is an example value set that includes all the LOINC codes for serum/plasma cholesterol from v2.36.")
self.assertEqual(inst.expansion.contains[0].code, "14647-2")
self.assertEqual(inst.expansion.contains[0].display, "Cholesterol [Moles/volume] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[0].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[0].version, "2.50")
self.assertTrue(inst.expansion.contains[1].abstract)
self.assertEqual(inst.expansion.contains[1].contains[0].code, "2093-3")
self.assertEqual(inst.expansion.contains[1].contains[0].display, "Cholesterol [Mass/volume] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[1].contains[0].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[1].contains[0].version, "2.50")
self.assertEqual(inst.expansion.contains[1].contains[1].code, "48620-9")
self.assertEqual(inst.expansion.contains[1].contains[1].display, "Cholesterol [Mass/volume] in Serum or Plasma ultracentrifugate")
self.assertEqual(inst.expansion.contains[1].contains[1].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[1].contains[1].version, "2.50")
self.assertEqual(inst.expansion.contains[1].contains[2].code, "9342-7")
self.assertEqual(inst.expansion.contains[1].contains[2].display, "Cholesterol [Percentile]")
self.assertEqual(inst.expansion.contains[1].contains[2].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[1].contains[2].version, "2.50")
self.assertEqual(inst.expansion.contains[1].display, "Cholesterol codes")
self.assertTrue(inst.expansion.contains[2].abstract)
self.assertEqual(inst.expansion.contains[2].contains[0].code, "2096-6")
self.assertEqual(inst.expansion.contains[2].contains[0].display, "Cholesterol/Triglyceride [Mass Ratio] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[2].contains[0].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[2].contains[0].version, "2.50")
self.assertEqual(inst.expansion.contains[2].contains[1].code, "35200-5")
self.assertEqual(inst.expansion.contains[2].contains[1].display, "Cholesterol/Triglyceride [Mass Ratio] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[2].contains[1].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[2].contains[1].version, "2.50")
self.assertEqual(inst.expansion.contains[2].contains[2].code, "48089-7")
self.assertEqual(inst.expansion.contains[2].contains[2].display, "Cholesterol/Apolipoprotein B [Molar ratio] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[2].contains[2].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[2].contains[2].version, "2.50")
self.assertEqual(inst.expansion.contains[2].contains[3].code, "55838-7")
self.assertEqual(inst.expansion.contains[2].contains[3].display, "Cholesterol/Phospholipid [Molar ratio] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[2].contains[3].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[2].contains[3].version, "2.50")
self.assertEqual(inst.expansion.contains[2].display, "Cholesterol Ratios")
self.assertEqual(inst.expansion.extension[0].url, "http://hl7.org/fhir/StructureDefinition/valueset-expansionSource")
self.assertEqual(inst.expansion.extension[0].valueUri, "http://hl7.org/fhir/ValueSet/example-extensional")
self.assertEqual(inst.expansion.identifier, "urn:uuid:42316ff8-2714-4680-9980-f37a6d1a71bc")
self.assertEqual(inst.expansion.offset, 0)
self.assertEqual(inst.expansion.parameter[0].name, "version")
self.assertEqual(inst.expansion.parameter[0].valueString, "2.50")
self.assertEqual(inst.expansion.timestamp.date, FHIRDate("2015-06-22T13:56:07Z").date)
self.assertEqual(inst.expansion.timestamp.as_json(), "2015-06-22T13:56:07Z")
self.assertEqual(inst.expansion.total, 8)
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-expansion")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "LOINC Codes for Cholesterol in Serum/Plasma")
self.assertEqual(inst.publisher, "FHIR Project team")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-expansion")
self.assertEqual(inst.version, "20150622")
def testValueSet4(self):
inst = self.instantiate_from("valueset-example-inactive.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet4(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet4(inst2)
def implValueSet4(self, inst):
self.assertTrue(inst.compose.inactive)
self.assertEqual(inst.compose.include[0].filter[0].op, "descendent-of")
self.assertEqual(inst.compose.include[0].filter[0].property, "concept")
self.assertEqual(inst.compose.include[0].filter[0].value, "_ActMoodPredicate")
self.assertEqual(inst.compose.include[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.description, "HL7 v3 ActMood Predicate codes, including inactive codes")
self.assertEqual(inst.expansion.contains[0].code, "CRT")
self.assertEqual(inst.expansion.contains[0].display, "criterion")
self.assertTrue(inst.expansion.contains[0].inactive)
self.assertEqual(inst.expansion.contains[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.contains[1].code, "EXPEC")
self.assertEqual(inst.expansion.contains[1].contains[0].code, "GOL")
self.assertEqual(inst.expansion.contains[1].contains[0].display, "goal")
self.assertEqual(inst.expansion.contains[1].contains[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.contains[1].contains[1].code, "RSK")
self.assertEqual(inst.expansion.contains[1].contains[1].display, "risk")
self.assertEqual(inst.expansion.contains[1].contains[1].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.contains[1].display, "expectation")
self.assertEqual(inst.expansion.contains[1].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.contains[2].code, "OPT")
self.assertEqual(inst.expansion.contains[2].display, "option")
self.assertEqual(inst.expansion.contains[2].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.identifier, "urn:uuid:46c00b3f-003a-4f31-9d4b-ea2de58b2a99")
self.assertEqual(inst.expansion.timestamp.date, FHIRDate("2017-02-26T10:00:00Z").date)
self.assertEqual(inst.expansion.timestamp.as_json(), "2017-02-26T10:00:00Z")
self.assertEqual(inst.id, "inactive")
self.assertEqual(inst.name, "Example-inactive")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Example with inactive codes")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/inactive")
self.assertEqual(inst.version, "4.0.0")
def testValueSet5(self):
inst = self.instantiate_from("valueset-example-filter.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet5(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet5(inst2)
def implValueSet5(self, inst):
self.assertEqual(inst.compose.include[0].filter[0].op, "=")
self.assertEqual(inst.compose.include[0].filter[0].property, "acme-plasma")
self.assertEqual(inst.compose.include[0].filter[0].value, "true")
self.assertEqual(inst.compose.include[0].system, "http://hl7.org/fhir/CodeSystem/example")
self.assertEqual(inst.contact[0].name, "FHIR project team")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.date.date, FHIRDate("2018-11-01").date)
self.assertEqual(inst.date.as_json(), "2018-11-01")
self.assertEqual(inst.description, "ACME Codes for Cholesterol: Plasma only - demonstrating the use of a filter defined in a CodeSystem")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-filter")
self.assertEqual(inst.name, "ACMECholCodesPlasma")
self.assertEqual(inst.publisher, "HL7 International")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "ACME Codes for Cholesterol: Plasma only")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-filter")
self.assertEqual(inst.version, "4.0.0")
def testValueSet6(self):
inst = self.instantiate_from("valueset-example-yesnodontknow.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet6(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet6(inst2)
def implValueSet6(self, inst):
self.assertEqual(inst.compose.include[0].valueSet[0], "http://terminology.hl7.org/ValueSet/v2-0136")
self.assertEqual(inst.compose.include[1].concept[0].code, "asked-unknown")
self.assertEqual(inst.compose.include[1].concept[0].display, "Don't know")
self.assertEqual(inst.compose.include[1].system, "http://terminology.hl7.org/CodeSystem/data-absent-reason")
self.assertEqual(inst.description, "For Capturing simple yes-no-don't know answers")
self.assertEqual(inst.expansion.contains[0].code, "Y")
self.assertEqual(inst.expansion.contains[0].display, "Yes")
self.assertEqual(inst.expansion.contains[0].system, "http://terminology.hl7.org/CodeSystem/v2-0136")
self.assertEqual(inst.expansion.contains[1].code, "N")
self.assertEqual(inst.expansion.contains[1].display, "No")
self.assertEqual(inst.expansion.contains[1].system, "http://terminology.hl7.org/CodeSystem/v2-0136")
self.assertEqual(inst.expansion.contains[2].code, "asked-unknown")
self.assertEqual(inst.expansion.contains[2].display, "Don't know")
self.assertEqual(inst.expansion.contains[2].system, "http://terminology.hl7.org/CodeSystem/data-absent-reason")
self.assertEqual(inst.expansion.identifier, "urn:uuid:bf99fe50-2c2b-41ad-bd63-bee6919810b4")
self.assertEqual(inst.expansion.timestamp.date, FHIRDate("2015-07-14T10:00:00Z").date)
self.assertEqual(inst.expansion.timestamp.as_json(), "2015-07-14T10:00:00Z")
self.assertEqual(inst.id, "yesnodontknow")
self.assertEqual(inst.name, "Yes/No/Don't Know")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/yesnodontknow")
self.assertEqual(inst.version, "4.0.0")
def testValueSet7(self):
inst = self.instantiate_from("valueset-examplescenario-actor-type.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet7(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet7(inst2)
def implValueSet7(self, inst):
self.assertEqual(inst.compose.include[0].system, "http://hl7.org/fhir/examplescenario-actor-type")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].value, "fhir@lists.hl7.org")
self.assertEqual(inst.date.date, FHIRDate("2018-12-27T22:37:54+11:00").date)
self.assertEqual(inst.date.as_json(), "2018-12-27T22:37:54+11:00")
self.assertEqual(inst.description, "The type of actor - system or human.")
self.assertFalse(inst.experimental)
self.assertEqual(inst.extension[0].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-wg")
self.assertEqual(inst.extension[0].valueCode, "fhir")
self.assertEqual(inst.extension[1].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-standards-status")
self.assertEqual(inst.extension[1].valueCode, "trial-use")
self.assertEqual(inst.extension[2].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-fmm")
self.assertEqual(inst.extension[2].valueInteger, 0)
self.assertEqual(inst.id, "examplescenario-actor-type")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:2.16.840.1.113883.4.642.3.858")
self.assertTrue(inst.immutable)
self.assertEqual(inst.meta.lastUpdated.date, FHIRDate("2018-12-27T22:37:54.724+11:00").date)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2018-12-27T22:37:54.724+11:00")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "ExampleScenarioActorType")
self.assertEqual(inst.publisher, "HL7 (FHIR Project)")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "ExampleScenarioActorType")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/examplescenario-actor-type")
self.assertEqual(inst.version, "4.0.0")
def testValueSet8(self):
inst = self.instantiate_from("valueset-list-example-codes.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet8(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet8(inst2)
def implValueSet8(self, inst):
self.assertEqual(inst.compose.include[0].system, "http://terminology.hl7.org/CodeSystem/list-example-use-codes")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.date.date, FHIRDate("2018-12-27T22:37:54+11:00").date)
self.assertEqual(inst.date.as_json(), "2018-12-27T22:37:54+11:00")
self.assertEqual(inst.description, "Example use codes for the List resource - typical kinds of use.")
self.assertFalse(inst.experimental)
self.assertEqual(inst.extension[0].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-wg")
self.assertEqual(inst.extension[0].valueCode, "fhir")
self.assertEqual(inst.extension[1].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-standards-status")
self.assertEqual(inst.extension[1].valueCode, "draft")
self.assertEqual(inst.extension[2].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-fmm")
self.assertEqual(inst.extension[2].valueInteger, 1)
self.assertEqual(inst.id, "list-example-codes")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:2.16.840.1.113883.4.642.3.316")
self.assertTrue(inst.immutable)
self.assertEqual(inst.meta.lastUpdated.date, FHIRDate("2018-12-27T22:37:54.724+11:00").date)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2018-12-27T22:37:54.724+11:00")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "ExampleUseCodesForList")
self.assertEqual(inst.publisher, "FHIR Project")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Example Use Codes for List")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/list-example-codes")
self.assertEqual(inst.version, "4.0.0")
def testValueSet9(self):
inst = self.instantiate_from("valueset-example-intensional.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet9(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet9(inst2)
def implValueSet9(self, inst):
self.assertEqual(inst.compose.exclude[0].concept[0].code, "5932-9")
self.assertEqual(inst.compose.exclude[0].concept[0].display, "Cholesterol [Presence] in Blood by Test strip")
self.assertEqual(inst.compose.exclude[0].system, "http://loinc.org")
self.assertEqual(inst.compose.include[0].filter[0].op, "=")
self.assertEqual(inst.compose.include[0].filter[0].property, "parent")
self.assertEqual(inst.compose.include[0].filter[0].value, "LP43571-6")
self.assertEqual(inst.compose.include[0].system, "http://loinc.org")
self.assertEqual(inst.contact[0].name, "FHIR project team")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.copyright, "This content from LOINC® is copyright © 1995 Regenstrief Institute, Inc. and the LOINC Committee, and available at no cost under the license at http://loinc.org/terms-of-use")
self.assertEqual(inst.date.date, FHIRDate("2015-06-22").date)
self.assertEqual(inst.date.as_json(), "2015-06-22")
self.assertEqual(inst.description, "This is an example value set that includes all the LOINC codes for serum/plasma cholesterol from v2.36.")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-intensional")
self.assertEqual(inst.identifier[0].system, "http://acme.com/identifiers/valuesets")
self.assertEqual(inst.identifier[0].value, "loinc-cholesterol-ext")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "LOINC Codes for Cholesterol in Serum/Plasma")
self.assertEqual(inst.publisher, "HL7 International")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-intensional")
self.assertEqual(inst.version, "20150622")
| bsd-3-clause | a4da61e79552a28de702919ca98d627b | 69.998062 | 219 | 0.699058 | 3.424472 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/medicationstatement.py | 1 | 8902 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/MedicationStatement) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class MedicationStatement(domainresource.DomainResource):
""" Record of medication being taken by a patient.
A record of a medication that is being consumed by a patient. A
MedicationStatement may indicate that the patient may be taking the
medication now, or has taken the medication in the past or will be taking
the medication in the future. The source of this information can be the
patient, significant other (such as a family member or spouse), or a
clinician. A common scenario where this information is captured is during
the history taking process during a patient visit or stay. The medication
information may come from sources such as the patient's memory, from a
prescription bottle, or from a list of medications the patient, clinician
or other party maintains
The primary difference between a medication statement and a medication
administration is that the medication administration has complete
administration information and is based on actual administration
information from the person who administered the medication. A medication
statement is often, if not always, less specific. There is no required
date/time when the medication was administered, in fact we only know that a
source has reported the patient is taking this medication, where details
such as time, quantity, or rate or even medication product may be
incomplete or missing or less precise. As stated earlier, the medication
statement information may come from the patient's memory, from a
prescription bottle or from a list of medications the patient, clinician or
other party maintains. Medication administration is more formal and is not
missing detailed information.
"""
resource_type = "MedicationStatement"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.basedOn = None
""" Fulfils plan, proposal or order.
List of `FHIRReference` items referencing `MedicationRequest, CarePlan, ProcedureRequest, ReferralRequest` (represented as `dict` in JSON). """
self.category = None
""" Type of medication usage.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.context = None
""" Encounter / Episode associated with MedicationStatement.
Type `FHIRReference` referencing `Encounter, EpisodeOfCare` (represented as `dict` in JSON). """
self.dateAsserted = None
""" When the statement was asserted?.
Type `FHIRDate` (represented as `str` in JSON). """
self.derivedFrom = None
""" Additional supporting information.
List of `FHIRReference` items referencing `Resource` (represented as `dict` in JSON). """
self.dosage = None
""" Details of how medication is/was taken or should be taken.
List of `Dosage` items (represented as `dict` in JSON). """
self.effectiveDateTime = None
""" The date/time or interval when the medication was taken.
Type `FHIRDate` (represented as `str` in JSON). """
self.effectivePeriod = None
""" The date/time or interval when the medication was taken.
Type `Period` (represented as `dict` in JSON). """
self.identifier = None
""" External identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.informationSource = None
""" Person or organization that provided the information about the
taking of this medication.
Type `FHIRReference` referencing `Patient, Practitioner, RelatedPerson, Organization` (represented as `dict` in JSON). """
self.medicationCodeableConcept = None
""" What medication was taken.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.medicationReference = None
""" What medication was taken.
Type `FHIRReference` referencing `Medication` (represented as `dict` in JSON). """
self.note = None
""" Further information about the statement.
List of `Annotation` items (represented as `dict` in JSON). """
self.partOf = None
""" Part of referenced event.
List of `FHIRReference` items referencing `MedicationAdministration, MedicationDispense, MedicationStatement, Procedure, Observation` (represented as `dict` in JSON). """
self.reasonCode = None
""" Reason for why the medication is being/was taken.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonNotTaken = None
""" True if asserting medication was not given.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
""" Condition or observation that supports why the medication is
being/was taken.
List of `FHIRReference` items referencing `Condition, Observation` (represented as `dict` in JSON). """
self.status = None
""" active | completed | entered-in-error | intended | stopped | on-
hold.
Type `str`. """
self.subject = None
""" Who is/was taking the medication.
Type `FHIRReference` referencing `Patient, Group` (represented as `dict` in JSON). """
self.taken = None
""" y | n | unk | na.
Type `str`. """
super(MedicationStatement, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationStatement, self).elementProperties()
js.extend([
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("category", "category", codeableconcept.CodeableConcept, False, None, False),
("context", "context", fhirreference.FHIRReference, False, None, False),
("dateAsserted", "dateAsserted", fhirdate.FHIRDate, False, None, False),
("derivedFrom", "derivedFrom", fhirreference.FHIRReference, True, None, False),
("dosage", "dosage", dosage.Dosage, True, None, False),
("effectiveDateTime", "effectiveDateTime", fhirdate.FHIRDate, False, "effective", False),
("effectivePeriod", "effectivePeriod", period.Period, False, "effective", False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("informationSource", "informationSource", fhirreference.FHIRReference, False, None, False),
("medicationCodeableConcept", "medicationCodeableConcept", codeableconcept.CodeableConcept, False, "medication", True),
("medicationReference", "medicationReference", fhirreference.FHIRReference, False, "medication", True),
("note", "note", annotation.Annotation, True, None, False),
("partOf", "partOf", fhirreference.FHIRReference, True, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonNotTaken", "reasonNotTaken", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("taken", "taken", str, False, None, True),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + '.dosage']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| bsd-3-clause | a0a03679502014baa09dc6e3b5205d4f | 46.100529 | 178 | 0.656819 | 4.342439 | false | false | false | false |
django/django-localflavor | localflavor/md/validators.py | 3 | 3143 | import re
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.utils.translation import gettext_lazy as _
from .choices import (LICENSE_PLATE_DIPLOMATIC, LICENSE_PLATE_GOVERNMENT_TYPE, LICENSE_PLATE_POLICE,
REGION_CHOICES_2002_2015)
class MDIDNOFieldValidator(RegexValidator):
"""
Validation for Moldavian IDNO.
.. versionadded:: 2.1
"""
error_message = _('Enter a valid IDNO number.')
regex = r'^\d{13}$'
message = error_message
class MDLicensePlateValidator(RegexValidator):
"""
Validation for `Moldavian License Plates`_.
.. _Moldavian License Plates: https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Moldova
.. versionadded:: 2.1
"""
error_message = _('Enter a valid license plate.')
regex = r'^\d{13}$'
message = error_message
def __call__(self, value):
value = value.upper()
if not self._is_valid(value):
raise ValidationError(self.error_message, code='invalid')
def _is_valid(self, value):
return any([
self._is_old_format(value),
self._is_new_format(value),
self._is_president_format(value),
self._is_diplomatic_format(value),
self._is_police_format(value),
self._is_foreign_format(value),
self._is_state_security_format(value),
self._is_gov_format(value)
])
@staticmethod
def _is_old_format(value):
regions = "|".join([code for code, desc in REGION_CHOICES_2002_2015])
pattern = r'({regions}) [A-Z]{{2}} \d{{1,3}}'.format(regions=regions)
return re.match(pattern, value) is not None
@staticmethod
def _is_new_format(value):
if not any(x in value for x, y in LICENSE_PLATE_POLICE):
pattern = r'^[A-Z]{3} \d{1,3}$'
return re.match(pattern, value) is not None
return False
@staticmethod
def _is_gov_format(value):
types = "|".join([code for code, desc in LICENSE_PLATE_GOVERNMENT_TYPE])
pattern = r'^RM ({types}) \d{{3}}$'.format(types=types)
return re.match(pattern, value) is not None
@staticmethod
def _is_diplomatic_format(value):
types = "|".join([code for code, desc in LICENSE_PLATE_DIPLOMATIC])
pattern = r'^({types}) \d{{3}} A{{1,2}}$'.format(types=types)
return re.match(pattern, value) is not None
@staticmethod
def _is_police_format(value):
types = "|".join([code for code, desc in LICENSE_PLATE_POLICE])
gov_format = r'^({types}) \d{{4}}$'.format(types=types)
return re.match(gov_format, value) is not None
@staticmethod
def _is_president_format(value):
pattern = r'^RM \d{4}$'
return re.match(pattern, value) is not None
@staticmethod
def _is_state_security_format(value):
pattern = r'^SP \d{3}$'
return re.match(pattern, value) is not None
@staticmethod
def _is_foreign_format(value):
pattern = r'^H \d{4}$'
return re.match(pattern, value) is not None
| bsd-3-clause | b6245de5fdfb1d57905a8cc565c3af7d | 31.739583 | 102 | 0.611836 | 3.488346 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/deviceusestatement.py | 1 | 3846 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/DeviceUseStatement) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class DeviceUseStatement(domainresource.DomainResource):
""" None.
A record of a device being used by a patient where the record is the result
of a report from the patient or another clinician.
"""
resource_name = "DeviceUseStatement"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.bodySiteCodeableConcept = None
""" Target body site.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.bodySiteReference = None
""" Target body site.
Type `FHIRReference` referencing `BodySite` (represented as `dict` in JSON). """
self.device = None
""" None.
Type `FHIRReference` referencing `Device` (represented as `dict` in JSON). """
self.identifier = None
""" None.
List of `Identifier` items (represented as `dict` in JSON). """
self.indication = None
""" None.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.notes = None
""" None.
List of `str` items. """
self.recordedOn = None
""" None.
Type `FHIRDate` (represented as `str` in JSON). """
self.subject = None
""" None.
Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """
self.timingDateTime = None
""" None.
Type `FHIRDate` (represented as `str` in JSON). """
self.timingPeriod = None
""" None.
Type `Period` (represented as `dict` in JSON). """
self.timingTiming = None
""" None.
Type `Timing` (represented as `dict` in JSON). """
self.whenUsed = None
""" None.
Type `Period` (represented as `dict` in JSON). """
super(DeviceUseStatement, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceUseStatement, self).elementProperties()
js.extend([
("bodySiteCodeableConcept", "bodySiteCodeableConcept", codeableconcept.CodeableConcept, False, "bodySite", False),
("bodySiteReference", "bodySiteReference", fhirreference.FHIRReference, False, "bodySite", False),
("device", "device", fhirreference.FHIRReference, False, None, True),
("identifier", "identifier", identifier.Identifier, True, None, False),
("indication", "indication", codeableconcept.CodeableConcept, True, None, False),
("notes", "notes", str, True, None, False),
("recordedOn", "recordedOn", fhirdate.FHIRDate, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("timingDateTime", "timingDateTime", fhirdate.FHIRDate, False, "timing", False),
("timingPeriod", "timingPeriod", period.Period, False, "timing", False),
("timingTiming", "timingTiming", timing.Timing, False, "timing", False),
("whenUsed", "whenUsed", period.Period, False, None, False),
])
return js
from . import codeableconcept
from . import fhirdate
from . import fhirreference
from . import identifier
from . import period
from . import timing
| bsd-3-clause | f6efa0df181a40fa14f706721f58b580 | 37.079208 | 126 | 0.599324 | 4.189542 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/3e991a30426b_adding_pipeline_id.py | 1 | 2129 | """adding_pipeline_id
Revision ID: 3e991a30426b
Revises: 4fa9a3846491, 88ea1bb98358
Create Date: 2021-10-20 15:17:12.729113
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '3e991a30426b'
down_revision = ('4fa9a3846491', '88ea1bb98358')
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_gc_validation_metrics', sa.Column('pipeline_id', sa.String(length=255), nullable=True))
op.add_column('genomic_aw2_raw', sa.Column('pipeline_id', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_gc_validation_metrics', 'pipeline_id')
op.drop_column('genomic_aw2_raw', 'pipeline_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | a363cc4d286157c48de1a87bacc864fc | 33.901639 | 125 | 0.746829 | 3.467427 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/tools/ehr_upload.py | 1 | 5475 | """Get site/participant info for a given organization to fetch consent forms from
ptc-uploads-all-of-us-rdr-prod (the default location for new participant uploads from PTC.
Syncs consent files to a new organization bucket by the same name in the Awardees consent bucket
i.e.aou179/hpo-site-xxx/Participant/<consent files>
To run: tools/ehr_upload.sh --account <pmi ops account> --project all-of-us-rdr-prod
"""
import io
import csv
import logging
import shlex
import subprocess
import urllib.request, urllib.error, urllib.parse
from sqlalchemy import bindparam, text
from rdr_service.dao import database_factory
from rdr_service.main_util import configure_logging, get_parser
SOURCE_BUCKET = "ptc-uploads-all-of-us-rdr-prod"
BUCKET_NAME = "Bucket Name"
AGGREGATING_ORG_ID = "Aggregating Org ID"
ORG_ID = "Org ID"
ORG_STATUS = "Org Status"
def get_sql(organization):
site_pairing_sql = text(
"""
select p.participant_id, google_group from participant p left join site s on
p.site_id = s.site_id
left join participant_summary summary on p.participant_id = summary.participant_id
where p.organization_id in (select organization_id from organization where external_id = :org)
and s.google_group is not null
and summary.consent_for_electronic_health_records = 1
and summary.consent_for_study_enrollment = 1
""",
bindparams=[bindparam("org", value=organization)],
)
no_site_pairing_sql = text(
"""
select p.participant_id from participant p
left join participant_summary summary on p.participant_id = summary.participant_id
where p.site_id is NULL and p.organization_id in (
select organization_id from organization where external_id = :org)
and summary.consent_for_electronic_health_records = 1
and summary.consent_for_study_enrollment = 1;
""",
bindparams=[bindparam("org", value=organization)],
)
return site_pairing_sql, no_site_pairing_sql
def _fetch_csv_data(file_url):
response = urllib.request.urlopen(file_url)
return csv.DictReader(io.StringIO(response.read()))
def _ensure_buckets(hpo_data):
"""Some organizations aggregating org ID is responsible for consent verification. This is a
parent/child relationship(not seen elsewhere in the RDR). If bucket name is blank it is safe to
assume the parent org (aggregate org id) bucket is to be used."""
for _, _dict in list(hpo_data.items()):
if _dict["bucket"] == "":
parent = _dict["aggregate_id"]
_dict["bucket"] = hpo_data[parent]["bucket"]
def read_hpo_report(csv_reader):
hpo_data = {}
for row in csv_reader:
bucket_name = row.get(BUCKET_NAME)
aggregate_id = row.get(AGGREGATING_ORG_ID)
org_id = row.get(ORG_ID)
org_status = row.get(ORG_STATUS)
if org_status == "Active":
hpo_data[org_id] = {"aggregate_id": aggregate_id, "bucket": bucket_name}
_ensure_buckets(hpo_data)
return hpo_data
def sync_ehr_consents(spreadsheet_id):
file_url = "https://docs.google.com/spreadsheets/d/%(id)s/export?format=csv&id=%(id)s&gid=%(" "gid)s" % {
"id": spreadsheet_id,
"gid": "0",
}
csv_reader = _fetch_csv_data(file_url)
next(csv_reader)
hpo_data = read_hpo_report(csv_reader)
logging.info("Reading data complete, beginning sync...")
for org, data in list(hpo_data.items()):
site_paired_sql, no_paired_sql = get_sql(org)
logging.info("syncing participants for {}".format(org))
run_sql(data["bucket"], site_paired_sql, no_paired_sql)
def run_gsutil(gsutil):
system_call = subprocess.Popen(shlex.split(gsutil))
system_call.communicate()[0]
def run_sql(destination_bucket, site_pairing_sql, no_site_pairing_sql):
with database_factory.make_server_cursor_database().session() as session:
cursor = session.execute(site_pairing_sql)
results = cursor.fetchall()
results = [(int(i), str(k)) for i, k in results]
for participant, google_group in results:
gsutil = (
"gsutil -m rsync gs://"
+ SOURCE_BUCKET
+ "/Participant/P"
+ str(participant)
+ "/* "
+ "gs://"
+ destination_bucket
+ "/Participant/"
+ google_group
+ "/P"
+ str(participant)
+ "/"
)
run_gsutil(gsutil)
cursor.close()
cursor = session.execute(no_site_pairing_sql)
results = cursor.fetchall()
results = [int(i) for i, in results]
for participant in results:
gsutil = (
"gsutil -m rsync gs://"
+ SOURCE_BUCKET
+ "/Participant/P"
+ str(participant)
+ "/* "
+ "gs://"
+ destination_bucket
+ "/Participant/"
+ "no_site_pairing"
+ "/P"
+ str(participant)
+ "/"
)
run_gsutil(gsutil)
cursor.close()
if __name__ == "__main__":
configure_logging()
parser = get_parser()
parser.add_argument(
"--spreadsheet_id", help="The id of the Google Spreadsheet to use. i.e. " "All-hpos-report", required=True
)
args = parser.parse_args()
sync_ehr_consents(args.spreadsheet_id)
| bsd-3-clause | 6ab6a9336832a186e7095571d78c2863 | 33.006211 | 114 | 0.611689 | 3.635458 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/rdr_client/client.py | 1 | 6893 | import argparse
import copy
import http.client
import json
import logging
import pprint
import httplib2
from oauth2client.service_account import ServiceAccountCredentials
SCOPE = "https://www.googleapis.com/auth/userinfo.email"
DEFAULT_INSTANCE = "https://pmi-drc-api-test.appspot.com"
_DEFAULT_BASE_PATH = "rdr/v1"
POST_HEADERS = {"Content-Type": "application/json; charset=utf-8"}
client_log = logging.getLogger(__name__)
class HttpException(BaseException):
def __init__(self, url, method, response, content):
message = "%s:%s - %s\n---%s" % (url, method, response.status, content)
super(HttpException, self).__init__(self, message)
self.message = message
self.code = response.status
self.response = response
self.content = content
class Client(object):
"""Encapsulation for making authenticated API JSON requests.
Command-line arg parsing for --instance and --creds_file, and implementation for making a JSON
request.
"""
def __init__(
self, base_path=_DEFAULT_BASE_PATH, parse_cli=True, creds_file=None, default_instance=None, parser=None
):
default_instance = default_instance or DEFAULT_INSTANCE
parser = parser or argparse.ArgumentParser()
if parse_cli:
self.args = self._parse_args(default_instance, parser)
if base_path == "offline" and self.args.project:
# Adjust the instance to be https://offline-dot-<PROJECT>.appspot.com
# for offline requests
self.instance = "https://offline-dot-%s" % self.args.instance[8:]
else:
self.instance = self.args.instance
if self.args.creds_file:
creds_file = self.args.creds_file
else:
self.instance = default_instance
self.base_path = base_path
if not creds_file and "localhost" not in self.instance:
raise ValueError("Client requires credentials for non-local instance %r." % self.instance)
self.creds_file = creds_file
self._http = self._get_authorized_http()
self.last_etag = None
def _parse_args(self, default_instance, parser):
parser.add_argument(
"--instance",
help="The instance to hit, either https://xxx.appspot.com, " "or http://localhost:8080",
default=default_instance,
)
parser.add_argument("--project", help="GCP project name associated with --instance.")
parser.add_argument(
"--creds_file", help="Path to a credentials file to use when talking to the server.", required=False
)
return parser.parse_args()
def _get_authorized_http(self):
if self.creds_file:
credentials = ServiceAccountCredentials.from_json_keyfile_name(self.creds_file, [SCOPE])
return credentials.authorize(httplib2.Http())
else:
return httplib2.Http()
def request(
self,
path,
method="GET",
body=None,
query_args=None,
headers=None,
cron=False,
absolute_path=False,
check_status=True,
authenticated=True,
pretend_date=None,
):
"""Sends an API request and returns a (response object, response content) tuple.
Args:
path: Relative URL path (such as "Participant/123"), unless absolute_path=True.
pretend_date: A datetime, used by the server (if nonprod requests are allowed) for creation
timestamps etc.
"""
if absolute_path:
url = path
else:
url = "{}/{}/{}".format(self.instance, self.base_path, path)
if query_args:
args_str = "&".join("{}={}".format(k, v) for k, v in query_args.items())
url = "{}?{}".format(url, args_str)
headers = copy.deepcopy(headers or {})
if method == "POST":
headers.update(POST_HEADERS)
if cron:
# Provide the header the dev_appserver uses for cron calls.
headers["X-Appengine-Cron"] = "true"
if pretend_date is not None:
headers["x-pretend-date"] = pretend_date.isoformat()
if authenticated:
resp, content = self._http.request(url, method, headers=headers, body=body)
else:
# On dev_appserver, there is no way to tell if a request is authenticated or not.
# This adds a header that we can use to reject 'unauthenticated' requests. What this
# is really testing is that the auth_required annotation is in all the right places.
headers["unauthenticated"] = "Yes"
http_ = httplib2.Http()
http_.force_exception_to_status_code = True
# httplib2 requires an attempt at authentication, else returns MalformedHeader error.
http_.add_credentials("no", "pw")
resp, content = http_.request(url, method, headers=headers, body=body)
client_log.info("%s for %s to %s", resp.status, method, url)
details_level = logging.WARNING if (check_status and resp.status != http.client.OK) else logging.DEBUG
if client_log.isEnabledFor(details_level):
try:
formatted_content = pprint.pformat(json.loads(content))
except ValueError:
formatted_content = content
client_log.log(
details_level, "Response headers: %s\nResponse content: %s", pprint.pformat(resp), formatted_content
)
if resp.status == http.client.UNAUTHORIZED:
client_log.warning(
"Unauthorized. If you expect this request to be allowed, try"
"tools/install_config.sh --config config/config_dev.json --update"
)
if check_status and resp.status != http.client.OK:
raise HttpException(url, method, resp, content)
if resp.get("etag"):
self.last_etag = resp["etag"]
return resp, content
def request_json(
self,
path,
method="GET",
body=None,
query_args=None,
headers=None,
cron=False,
absolute_path=False,
pretend_date=None,
check_status=True,
):
json_body = None
if body:
json_body = json.dumps(body)
elif method == "POST":
json_body = "{}"
_, content = self.request(
path,
method,
body=json_body,
query_args=query_args,
headers=headers,
cron=cron,
absolute_path=absolute_path,
pretend_date=pretend_date,
check_status=check_status,
)
try:
return json.loads(content)
except ValueError:
logging.error("Error decoding response content:\n%r", content)
raise
| bsd-3-clause | 9c1c1a1fa0d3ddc2e8405fb727c4cebc | 36.259459 | 116 | 0.593065 | 4.195374 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/c68d427d67b3_adding_vcf_tbi_files_to_genomic_gc_.py | 1 | 1376 | """adding vcf tbi files to genomic gc metrics validation
Revision ID: c68d427d67b3
Revises: f80b79cff59e
Create Date: 2020-08-12 15:51:23.285273
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c68d427d67b3'
down_revision = 'f80b79cff59e'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_gc_validation_metrics', sa.Column('vcf_tbi_path', sa.String(length=255), nullable=True))
op.add_column('genomic_gc_validation_metrics', sa.Column('vcf_tbi_received', sa.SmallInteger(), nullable=False))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_gc_validation_metrics', 'vcf_tbi_received')
op.drop_column('genomic_gc_validation_metrics', 'vcf_tbi_path')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | b2632888d23180a3dbab7cabe3d0dc37 | 26.52 | 116 | 0.680233 | 3.331719 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/procedure_tests.py | 1 | 17117 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import procedure
from .fhirdate import FHIRDate
class ProcedureTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Procedure", js["resourceType"])
return procedure.Procedure(js)
def testProcedure1(self):
inst = self.instantiate_from("procedure-example-ambulation.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure1(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure1(inst2)
def implProcedure1(self, inst):
self.assertEqual(inst.code.coding[0].code, "62013009")
self.assertEqual(inst.code.coding[0].display, "Ambulating patient (procedure)")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Ambulation")
self.assertEqual(inst.id, "ambulation")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertTrue(inst.notDone)
self.assertEqual(inst.notDoneReason.coding[0].code, "398254007")
self.assertEqual(inst.notDoneReason.coding[0].display, " Pre-eclampsia (disorder)")
self.assertEqual(inst.notDoneReason.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.notDoneReason.text, "Pre-eclampsia")
self.assertEqual(inst.status, "suspended")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Ambulation procedure was not done</div>")
self.assertEqual(inst.text.status, "generated")
def testProcedure2(self):
inst = self.instantiate_from("procedure-example-appendectomy-narrative.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure2(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure2(inst2)
def implProcedure2(self, inst):
self.assertEqual(inst.id, "appendectomy-narrative")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Routine Appendectomy in April 2013 performed by Dr Cecil Surgeon</div>")
self.assertEqual(inst.text.status, "additional")
def testProcedure3(self):
inst = self.instantiate_from("procedure-example-biopsy.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure3(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure3(inst2)
def implProcedure3(self, inst):
self.assertEqual(inst.bodySite[0].coding[0].code, "368225008")
self.assertEqual(inst.bodySite[0].coding[0].display, "Entire Left Forearm")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.bodySite[0].text, "Left forearm")
self.assertEqual(inst.category.coding[0].code, "103693007")
self.assertEqual(inst.category.coding[0].display, "Diagnostic procedure (procedure)")
self.assertEqual(inst.category.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category.text, "Diagnostic procedure")
self.assertEqual(inst.code.coding[0].code, "90105005")
self.assertEqual(inst.code.coding[0].display, "Biopsy of soft tissue of forearm (Procedure)")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Biopsy of suspected melanoma L) arm")
self.assertEqual(inst.complication[0].coding[0].code, "67750007")
self.assertEqual(inst.complication[0].coding[0].display, "Ineffective airway clearance (finding)")
self.assertEqual(inst.complication[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.complication[0].text, "Ineffective airway clearance")
self.assertEqual(inst.followUp[0].text, "Review in clinic")
self.assertEqual(inst.id, "biopsy")
self.assertEqual(inst.note[0].text, "Standard Biopsy")
self.assertEqual(inst.performedDateTime.date, FHIRDate("2014-02-03").date)
self.assertEqual(inst.performedDateTime.as_json(), "2014-02-03")
self.assertEqual(inst.reasonCode[0].text, "Dark lesion l) forearm. getting darker last 3 months.")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Biopsy of suspected melanoma L) arm</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.usedCode[0].coding[0].code, "79068005")
self.assertEqual(inst.usedCode[0].coding[0].display, "Needle, device (physical object)")
self.assertEqual(inst.usedCode[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.usedCode[0].text, "30-guage needle")
def testProcedure4(self):
inst = self.instantiate_from("procedure-example-colon-biopsy.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure4(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure4(inst2)
def implProcedure4(self, inst):
self.assertEqual(inst.code.coding[0].code, "76164006")
self.assertEqual(inst.code.coding[0].display, "Biopsy of colon (procedure)")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Biopsy of colon")
self.assertEqual(inst.id, "colon-biopsy")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertFalse(inst.notDone)
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Biopsy of colon, which was part of colonoscopy</div>")
self.assertEqual(inst.text.status, "generated")
def testProcedure5(self):
inst = self.instantiate_from("procedure-example-colonoscopy.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure5(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure5(inst2)
def implProcedure5(self, inst):
self.assertEqual(inst.code.coding[0].code, "73761001")
self.assertEqual(inst.code.coding[0].display, "Colonoscopy (procedure)")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Colonoscopy")
self.assertEqual(inst.id, "colonoscopy")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertFalse(inst.notDone)
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Colonoscopy with complication</div>")
self.assertEqual(inst.text.status, "generated")
def testProcedure6(self):
inst = self.instantiate_from("procedure-example-education.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure6(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure6(inst2)
def implProcedure6(self, inst):
self.assertEqual(inst.category.coding[0].code, "311401005")
self.assertEqual(inst.category.coding[0].display, "Patient education (procedure)")
self.assertEqual(inst.category.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category.text, "Education")
self.assertEqual(inst.code.coding[0].code, "48023004")
self.assertEqual(inst.code.coding[0].display, "Breast self-examination technique education (procedure)")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Health education - breast examination")
self.assertEqual(inst.id, "education")
self.assertEqual(inst.performedDateTime.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.performedDateTime.as_json(), "2014-08-16")
self.assertEqual(inst.reasonCode[0].text, "early detection of breast mass")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Health education - breast examination for early detection of breast mass</div>")
self.assertEqual(inst.text.status, "generated")
def testProcedure7(self):
inst = self.instantiate_from("procedure-example-f001-heart.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure7(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure7(inst2)
def implProcedure7(self, inst):
self.assertEqual(inst.bodySite[0].coding[0].code, "17401000")
self.assertEqual(inst.bodySite[0].coding[0].display, "Heart valve structure")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.coding[0].code, "34068001")
self.assertEqual(inst.code.coding[0].display, "Heart valve replacement")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.followUp[0].text, "described in care plan")
self.assertEqual(inst.id, "f001")
self.assertEqual(inst.outcome.text, "improved blood circulation")
self.assertEqual(inst.performedPeriod.end.date, FHIRDate("2011-06-27").date)
self.assertEqual(inst.performedPeriod.end.as_json(), "2011-06-27")
self.assertEqual(inst.performedPeriod.start.date, FHIRDate("2011-06-26").date)
self.assertEqual(inst.performedPeriod.start.as_json(), "2011-06-26")
self.assertEqual(inst.performer[0].role.coding[0].code, "01.000")
self.assertEqual(inst.performer[0].role.coding[0].display, "Arts")
self.assertEqual(inst.performer[0].role.coding[0].system, "urn:oid:2.16.840.1.113883.2.4.15.111")
self.assertEqual(inst.performer[0].role.text, "Care role")
self.assertEqual(inst.reasonCode[0].text, "Heart valve disorder")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.status, "generated")
def testProcedure8(self):
inst = self.instantiate_from("procedure-example-f002-lung.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure8(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure8(inst2)
def implProcedure8(self, inst):
self.assertEqual(inst.bodySite[0].coding[0].code, "39607008")
self.assertEqual(inst.bodySite[0].coding[0].display, "Lung structure")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.coding[0].code, "359615001")
self.assertEqual(inst.code.coding[0].display, "Partial lobectomy of lung")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.followUp[0].text, "described in care plan")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.outcome.text, "improved blood circulation")
self.assertEqual(inst.performedPeriod.end.date, FHIRDate("2013-03-08T09:30:10+01:00").date)
self.assertEqual(inst.performedPeriod.end.as_json(), "2013-03-08T09:30:10+01:00")
self.assertEqual(inst.performedPeriod.start.date, FHIRDate("2013-03-08T09:00:10+01:00").date)
self.assertEqual(inst.performedPeriod.start.as_json(), "2013-03-08T09:00:10+01:00")
self.assertEqual(inst.performer[0].role.coding[0].code, "01.000")
self.assertEqual(inst.performer[0].role.coding[0].display, "Arts")
self.assertEqual(inst.performer[0].role.coding[0].system, "urn:oid:2.16.840.1.113883.2.4.15.111")
self.assertEqual(inst.performer[0].role.text, "Care role")
self.assertEqual(inst.reasonCode[0].text, "Malignant tumor of lung")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.status, "generated")
def testProcedure9(self):
inst = self.instantiate_from("procedure-example-f003-abscess.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure9(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure9(inst2)
def implProcedure9(self, inst):
self.assertEqual(inst.bodySite[0].coding[0].code, "83030008")
self.assertEqual(inst.bodySite[0].coding[0].display, "Retropharyngeal area")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.coding[0].code, "172960003")
self.assertEqual(inst.code.coding[0].display, "Incision of retropharyngeal abscess")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.followUp[0].text, "described in care plan")
self.assertEqual(inst.id, "f003")
self.assertEqual(inst.outcome.text, "removal of the retropharyngeal abscess")
self.assertEqual(inst.performedPeriod.end.date, FHIRDate("2013-03-24T10:30:10+01:00").date)
self.assertEqual(inst.performedPeriod.end.as_json(), "2013-03-24T10:30:10+01:00")
self.assertEqual(inst.performedPeriod.start.date, FHIRDate("2013-03-24T09:30:10+01:00").date)
self.assertEqual(inst.performedPeriod.start.as_json(), "2013-03-24T09:30:10+01:00")
self.assertEqual(inst.performer[0].role.coding[0].code, "01.000")
self.assertEqual(inst.performer[0].role.coding[0].display, "Arts")
self.assertEqual(inst.performer[0].role.coding[0].system, "urn:oid:2.16.840.1.113883.2.4.15.111")
self.assertEqual(inst.performer[0].role.text, "Care role")
self.assertEqual(inst.reasonCode[0].text, "abcess in retropharyngeal area")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.status, "generated")
def testProcedure10(self):
inst = self.instantiate_from("procedure-example-f004-tracheotomy.json")
self.assertIsNotNone(inst, "Must have instantiated a Procedure instance")
self.implProcedure10(inst)
js = inst.as_json()
self.assertEqual("Procedure", js["resourceType"])
inst2 = procedure.Procedure(js)
self.implProcedure10(inst2)
def implProcedure10(self, inst):
self.assertEqual(inst.bodySite[0].coding[0].code, "83030008")
self.assertEqual(inst.bodySite[0].coding[0].display, "Retropharyngeal area")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.coding[0].code, "48387007")
self.assertEqual(inst.code.coding[0].display, "Tracheotomy")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.followUp[0].text, "described in care plan")
self.assertEqual(inst.id, "f004")
self.assertEqual(inst.outcome.text, "removal of the retropharyngeal abscess")
self.assertEqual(inst.performedPeriod.end.date, FHIRDate("2013-03-22T10:30:10+01:00").date)
self.assertEqual(inst.performedPeriod.end.as_json(), "2013-03-22T10:30:10+01:00")
self.assertEqual(inst.performedPeriod.start.date, FHIRDate("2013-03-22T09:30:10+01:00").date)
self.assertEqual(inst.performedPeriod.start.as_json(), "2013-03-22T09:30:10+01:00")
self.assertEqual(inst.performer[0].role.coding[0].code, "01.000")
self.assertEqual(inst.performer[0].role.coding[0].display, "Arts")
self.assertEqual(inst.performer[0].role.coding[0].system, "urn:oid:2.16.840.1.113883.2.4.15.111")
self.assertEqual(inst.performer[0].role.text, "Care role")
self.assertEqual(inst.reasonCode[0].text, "ensure breathing during surgery")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | d2b7178e548766b50b7df2afe937f0ee | 54.937908 | 165 | 0.683473 | 3.329508 | false | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/measurereport.py | 1 | 14814 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MeasureReport) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class MeasureReport(domainresource.DomainResource):
""" Results of a measure evaluation.
The MeasureReport resource contains the results of the calculation of a
measure; and optionally a reference to the resources involved in that
calculation.
"""
resource_type = "MeasureReport"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.date = None
""" When the report was generated.
Type `FHIRDate` (represented as `str` in JSON). """
self.evaluatedResource = None
""" What data was used to calculate the measure score.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.group = None
""" Measure results for each group.
List of `MeasureReportGroup` items (represented as `dict` in JSON). """
self.identifier = None
""" Additional identifier for the MeasureReport.
List of `Identifier` items (represented as `dict` in JSON). """
self.improvementNotation = None
""" increase | decrease.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.measure = None
""" What measure was calculated.
Type `str`. """
self.period = None
""" What period the report covers.
Type `Period` (represented as `dict` in JSON). """
self.reporter = None
""" Who is reporting the data.
Type `FHIRReference` (represented as `dict` in JSON). """
self.status = None
""" complete | pending | error.
Type `str`. """
self.subject = None
""" What individual(s) the report is for.
Type `FHIRReference` (represented as `dict` in JSON). """
self.type = None
""" individual | subject-list | summary | data-collection.
Type `str`. """
super(MeasureReport, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReport, self).elementProperties()
js.extend([
("date", "date", fhirdate.FHIRDate, False, None, False),
("evaluatedResource", "evaluatedResource", fhirreference.FHIRReference, True, None, False),
("group", "group", MeasureReportGroup, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("improvementNotation", "improvementNotation", codeableconcept.CodeableConcept, False, None, False),
("measure", "measure", str, False, None, True),
("period", "period", period.Period, False, None, True),
("reporter", "reporter", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("type", "type", str, False, None, True),
])
return js
from . import backboneelement
class MeasureReportGroup(backboneelement.BackboneElement):
""" Measure results for each group.
The results of the calculation, one for each population group in the
measure.
"""
resource_type = "MeasureReportGroup"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the group.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.measureScore = None
""" What score this group achieved.
Type `Quantity` (represented as `dict` in JSON). """
self.population = None
""" The populations in the group.
List of `MeasureReportGroupPopulation` items (represented as `dict` in JSON). """
self.stratifier = None
""" Stratification results.
List of `MeasureReportGroupStratifier` items (represented as `dict` in JSON). """
super(MeasureReportGroup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroup, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("measureScore", "measureScore", quantity.Quantity, False, None, False),
("population", "population", MeasureReportGroupPopulation, True, None, False),
("stratifier", "stratifier", MeasureReportGroupStratifier, True, None, False),
])
return js
class MeasureReportGroupPopulation(backboneelement.BackboneElement):
""" The populations in the group.
The populations that make up the population group, one for each type of
population appropriate for the measure.
"""
resource_type = "MeasureReportGroupPopulation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" initial-population | numerator | numerator-exclusion | denominator
| denominator-exclusion | denominator-exception | measure-
population | measure-population-exclusion | measure-observation.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.count = None
""" Size of the population.
Type `int`. """
self.subjectResults = None
""" For subject-list reports, the subject results in this population.
Type `FHIRReference` (represented as `dict` in JSON). """
super(MeasureReportGroupPopulation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupPopulation, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("count", "count", int, False, None, False),
("subjectResults", "subjectResults", fhirreference.FHIRReference, False, None, False),
])
return js
class MeasureReportGroupStratifier(backboneelement.BackboneElement):
""" Stratification results.
When a measure includes multiple stratifiers, there will be a stratifier
group for each stratifier defined by the measure.
"""
resource_type = "MeasureReportGroupStratifier"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" What stratifier of the group.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.stratum = None
""" Stratum results, one for each unique value, or set of values, in
the stratifier, or stratifier components.
List of `MeasureReportGroupStratifierStratum` items (represented as `dict` in JSON). """
super(MeasureReportGroupStratifier, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupStratifier, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, True, None, False),
("stratum", "stratum", MeasureReportGroupStratifierStratum, True, None, False),
])
return js
class MeasureReportGroupStratifierStratum(backboneelement.BackboneElement):
""" Stratum results, one for each unique value, or set of values, in the
stratifier, or stratifier components.
This element contains the results for a single stratum within the
stratifier. For example, when stratifying on administrative gender, there
will be four strata, one for each possible gender value.
"""
resource_type = "MeasureReportGroupStratifierStratum"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.component = None
""" Stratifier component values.
List of `MeasureReportGroupStratifierStratumComponent` items (represented as `dict` in JSON). """
self.measureScore = None
""" What score this stratum achieved.
Type `Quantity` (represented as `dict` in JSON). """
self.population = None
""" Population results in this stratum.
List of `MeasureReportGroupStratifierStratumPopulation` items (represented as `dict` in JSON). """
self.value = None
""" The stratum value, e.g. male.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MeasureReportGroupStratifierStratum, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupStratifierStratum, self).elementProperties()
js.extend([
("component", "component", MeasureReportGroupStratifierStratumComponent, True, None, False),
("measureScore", "measureScore", quantity.Quantity, False, None, False),
("population", "population", MeasureReportGroupStratifierStratumPopulation, True, None, False),
("value", "value", codeableconcept.CodeableConcept, False, None, False),
])
return js
class MeasureReportGroupStratifierStratumComponent(backboneelement.BackboneElement):
""" Stratifier component values.
A stratifier component value.
"""
resource_type = "MeasureReportGroupStratifierStratumComponent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" What stratifier component of the group.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.value = None
""" The stratum component value, e.g. male.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MeasureReportGroupStratifierStratumComponent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupStratifierStratumComponent, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, True),
("value", "value", codeableconcept.CodeableConcept, False, None, True),
])
return js
class MeasureReportGroupStratifierStratumPopulation(backboneelement.BackboneElement):
""" Population results in this stratum.
The populations that make up the stratum, one for each type of population
appropriate to the measure.
"""
resource_type = "MeasureReportGroupStratifierStratumPopulation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" initial-population | numerator | numerator-exclusion | denominator
| denominator-exclusion | denominator-exception | measure-
population | measure-population-exclusion | measure-observation.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.count = None
""" Size of the population.
Type `int`. """
self.subjectResults = None
""" For subject-list reports, the subject results in this population.
Type `FHIRReference` (represented as `dict` in JSON). """
super(MeasureReportGroupStratifierStratumPopulation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupStratifierStratumPopulation, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("count", "count", int, False, None, False),
("subjectResults", "subjectResults", fhirreference.FHIRReference, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| bsd-3-clause | d2e4d92738216e67c1f0580c67eb14a9 | 39.037838 | 112 | 0.638248 | 4.515087 | false | false | false | false |
all-of-us/raw-data-repository | tests/client_tests/test_requests.py | 1 | 1132 | import http.client
import unittest
from tests.client_tests.base import BaseClientTest
class RequestsTest(BaseClientTest):
"""Tests basic mechanics of requests: authorization and headers."""
def test_unauthenticated(self):
response, _ = self.client.request(
"Participant", method="POST", body="{}", authenticated=False, check_status=False
)
self.assertEqual(response.status, http.client.UNAUTHORIZED)
def test_header_values(self):
response, _ = self.client.request("Participant", method="POST", body="{}")
for required_header, required_value in (
("content-disposition", 'attachment; filename="f.txt"'),
("content-type", "application/json; charset=utf-8"),
("x-content-type-options", "nosniff"),
):
self.assertEqual(
response.get(required_header),
required_value,
"Response header %r was set to %r, expected %r."
% (required_header, response.get(required_header), required_value),
)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 0e10518c274c5a7ff3e78963613b5d27 | 34.375 | 92 | 0.606007 | 4.223881 | false | true | false | false |
all-of-us/raw-data-repository | tests/dao_tests/test_bigquery_sync_dao.py | 1 | 25805 | from datetime import datetime, timedelta
import json
from rdr_service.clock import FakeClock
from rdr_service import clock
from rdr_service.code_constants import *
from rdr_service.dao.biobank_order_dao import BiobankOrderDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.physical_measurements_dao import PhysicalMeasurementsDao
from rdr_service.model.biobank_order import BiobankOrder, BiobankOrderIdentifier, BiobankOrderedSample
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.model.hpo import HPO
from rdr_service.model.measurements import PhysicalMeasurements
from rdr_service.model.site import Site
from rdr_service.participant_enums import WithdrawalAIANCeremonyStatus
from tests.test_data import load_measurement_json
from tests.helpers.unittest_base import BaseTestCase, PDRGeneratorTestMixin
class BigQuerySyncDaoTest(BaseTestCase, PDRGeneratorTestMixin):
TIME_1 = datetime(2018, 9, 20, 5, 49, 11)
TIME_2 = datetime(2018, 9, 24, 14, 21, 1)
TIME_3 = datetime(2018, 9, 25, 12, 25, 30)
site = None
hpo = None
summary = None
pm_json = None
pm = None
bio_order = None
qn_thebasics_id = None
qn_ehrconsent_id = None
qn_dvehrconsent_id = None
qn_lifestyle_id = None
qn_overall_health_id = None
qn_gror_id = None
def setUp(self):
super(BigQuerySyncDaoTest, self).setUp(with_consent_codes=True)
self.dao = ParticipantDao()
with self.dao.session() as session:
self.site = session.query(Site).filter(Site.googleGroup == 'hpo-site-monroeville').first()
self.hpo = session.query(HPO).filter(HPO.name == 'PITT').first()
self.provider_link = {
"primary": True, "organization": {"display": None, "reference": "Organization/PITT"}}
with clock.FakeClock(self.TIME_1):
self.participant = self.create_participant(self.provider_link)
self.participant_id = int(self.participant['participantId'].replace('P', ''))
self.biobank_id = int(self.participant['biobankId'].replace('Z', ''))
def create_participant(self, provider_link=None):
if provider_link:
provider_link = {"providerLink": [provider_link]}
else:
provider_link = {}
response = self.send_post("Participant", provider_link)
return response
def _submit_ehrconsent(self, participant_id, response_code=CONSENT_PERMISSION_YES_CODE, response_time=None):
""" Submit the EHRConsent questionnaire """
if not self.qn_ehrconsent_id:
self.qn_ehrconsent_id = self.create_questionnaire("ehr_consent_questionnaire.json")
code_answers = list()
code_answers.append(self.make_code_answer('ehrConsent', response_code))
qr = self.make_questionnaire_response_json(participant_id, self.qn_ehrconsent_id,
code_answers=code_answers)
with FakeClock(response_time or self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_sensitive_ehr(self, participant_id, response_data={}, response_time=None):
if not self.qn_ehrconsent_id:
self.qn_ehrconsent_id = self.create_questionnaire("ehr_consent_questionnaire.json")
code_answers = list()
for key, value in response_data.items():
code_answers.append(self.make_code_answer(key, value))
qr = self.make_questionnaire_response_json(participant_id, self.qn_ehrconsent_id,
code_answers=code_answers)
with FakeClock(response_time or self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_ehrconsent_expired(self, participant_id, response_code=CONSENT_PERMISSION_NO_CODE, response_time=None):
""" Submit the EHRConsent questionnaire """
if not self.qn_ehrconsent_id:
self.qn_ehrconsent_id = self.create_questionnaire("ehr_consent_questionnaire.json")
code_answers = []
code_answers.append(self.make_code_answer('ehrConsent', response_code))
qr_json = self.make_questionnaire_response_json(
participant_id,
self.qn_ehrconsent_id,
string_answers=[['ehrConsentExpired', 'EHRConsentPII_ConsentExpired_Yes']],
code_answers=code_answers,
authored=response_time if response_time else self.TIME_1
)
with FakeClock(response_time or self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr_json)
def _submit_dvehrconsent(self, participant_id, response_code=DVEHRSHARING_CONSENT_CODE_YES, response_time=None):
""" Submit the DVEHRConsent questionnaire """
if not self.qn_dvehrconsent_id:
self.qn_dvehrconsent_id = self.create_questionnaire("dv_ehr_share_consent_questionnaire.json")
code_answers = list()
code_answers.append(self.make_code_answer(DVEHR_SHARING_QUESTION_CODE, response_code))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_dvehrconsent_id,
code_answers=code_answers)
with FakeClock(response_time or self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_thebasics(self, participant_id):
""" Submit the TheBasics questionnaire """
if not self.qn_thebasics_id:
self.qn_thebasics_id = self.create_questionnaire("questionnaire3.json")
string_answers = list()
string_answers.append(('firstName', 'John'))
string_answers.append(('lastName', 'Doe'))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_thebasics_id,
string_answers=string_answers)
with FakeClock(self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_lifestyle(self, participant_id):
""" Submit the LifeStyle questionnaire """
if not self.qn_lifestyle_id:
self.qn_lifestyle_id = self.create_questionnaire("questionnaire4.json")
code_answers = list()
code_answers.append(self.make_code_answer('state', UNSET))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_lifestyle_id,
code_answers=code_answers)
with FakeClock(self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_overall_health(self, participant_id):
""" Submit the OverallHealth questionnaire """
if not self.qn_overall_health_id:
self.qn_overall_health_id = self.create_questionnaire("questionnaire_overall_health.json")
code_answers = list()
code_answers.append(self.make_code_answer('physicalHealth', UNSET))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_overall_health_id,
code_answers=code_answers)
with FakeClock(self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_genomics_ror(self, participant_id, consent_response=CONSENT_GROR_YES_CODE, response_time=None):
""" Submit the Genomics ROR questionnaire """
if not self.qn_gror_id:
self.qn_gror_id = self.create_questionnaire("consent_for_genomic_ror_question.json")
code_answers = list()
code_answers.append(self.make_code_answer('genomic_consent', consent_response))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_gror_id, code_answers=code_answers)
with FakeClock(response_time or self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _make_physical_measurements(self, **kwargs):
"""Makes a new PhysicalMeasurements (same values every time) with valid/complete defaults.
Kwargs pass through to PM constructor, overriding defaults.
"""
for k, default_value in (
('physicalMeasurementsId', 1),
('participantId', self.participant_id),
('createdSiteId', self.site.siteId),
('finalizedSiteId', self.site.siteId)):
if k not in kwargs:
kwargs[k] = default_value
record = PhysicalMeasurements(**kwargs)
PhysicalMeasurementsDao.store_record_fhir_doc(record, self.pm_json)
return record
def _make_biobank_order(self, **kwargs):
"""Makes a new BiobankOrder (same values every time) with valid/complete defaults.
Kwargs pass through to BiobankOrder constructor, overriding defaults.
"""
for k, default_value in (
('biobankOrderId', '1'),
('created', clock.CLOCK.now()),
('participantId', self.participant_id),
('sourceSiteId', 1),
('sourceUsername', 'fred@pmi-ops.org'),
('collectedSiteId', 1),
('collectedUsername', 'joe@pmi-ops.org'),
('processedSiteId', 1),
('processedUsername', 'sue@pmi-ops.org'),
('finalizedSiteId', 2),
('finalizedUsername', 'bob@pmi-ops.org'),
('identifiers', [BiobankOrderIdentifier(system='https://www.pmi-ops.org', value='123456789')]),
('samples', [BiobankOrderedSample(
biobankOrderId='1',
test='1ED04',
description=u'description',
finalized=self.TIME_1,
processingRequired=True)])):
if k not in kwargs:
kwargs[k] = default_value
biobank_order = BiobankOrder(**kwargs)
bss = BiobankStoredSample()
bss.biobankId = self.biobank_id
bss.test = '1ED04'
bss.biobankOrderIdentifier = '123456789'
bss.confirmed = self.TIME_2
bss.created = self.TIME_2
bss.biobankStoredSampleId = 'I11111111'
bss.family_id = 'F11111111'
with self.dao.session() as session:
session.add(bss)
return biobank_order
def test_registered_participant_gen(self):
""" Test a BigQuery after initial participant creation """
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual(ps_json['enrollment_status'], 'REGISTERED')
def test_interested_participant_gen(self):
""" Basic Participant Creation Test"""
self.send_consent(self.participant_id)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual(ps_json.get('sign_up_time', None),
self.TIME_1.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(ps_json.get('suspension_status', None), 'NOT_SUSPENDED')
self.assertEqual(ps_json.get('withdrawn_status'), None, 'NOT_WITHDRAWN')
self.assertEqual(ps_json.get('enrollment_status', None), 'PARTICIPANT')
def test_member_participant_status(self):
""" Member Participant Test"""
# set up questionnaires to hit the calculate_max_core_sample_time in participant summary
self.send_consent(self.participant_id)
self._submit_ehrconsent(self.participant_id)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual(ps_json['enrollment_status'], 'FULLY_CONSENTED')
def _set_up_participant_data(self, fake_time=None, skip_ehr=False):
# set up questionnaires to hit the calculate_max_core_sample_time in participant summary
with clock.FakeClock(fake_time or self.TIME_1):
self.send_consent(self.participant_id)
if not skip_ehr:
self._submit_ehrconsent(self.participant_id)
self._submit_lifestyle(self.participant_id)
self._submit_thebasics(self.participant_id)
self._submit_overall_health(self.participant_id)
self.pm_json = json.dumps(load_measurement_json(self.participant_id, self.TIME_2.isoformat()))
self.pm = PhysicalMeasurementsDao().insert(self._make_physical_measurements())
self.dao = BiobankOrderDao()
self.bio_order = BiobankOrderDao().insert(
self._make_biobank_order(participantId=self.participant_id))
def test_full_participant_status(self):
""" Full Participant Test"""
self._set_up_participant_data()
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual('COHORT_2', ps_json['consent_cohort'], 'Test is built assuming cohort 2')
self.assertEqual(ps_json['pm'][0]['pm_finalized_site'], 'hpo-site-monroeville')
self.assertEqual(ps_json['pm'][0]['pm_status'], 'COMPLETED')
self.assertEqual(ps_json['enrollment_status'], 'CORE_PARTICIPANT')
def test_ehr_consent_expired_for_full_consent_participant(self):
p_response = self.create_participant(self.provider_link)
p_id = int(p_response['participantId'].replace('P', ''))
self.send_consent(p_id, authored=self.TIME_1)
self._submit_ehrconsent(p_id, response_time=self.TIME_1)
ps_json = self.make_bq_participant_summary(p_id)
self.assertIsNotNone(ps_json)
self.assertEqual(ps_json['enrollment_status'], 'FULLY_CONSENTED')
# send ehr consent expired response
self._submit_ehrconsent_expired(p_id, response_time=self.TIME_2)
ps_json = self.make_bq_participant_summary(p_id)
self.assertIsNotNone(ps_json)
# downgrade FULLY_CONSENTED to PARTICIPANT
self.assertEqual(ps_json['enrollment_status'], 'PARTICIPANT')
def test_ehr_consent_expired_for_core_participant(self):
self._set_up_participant_data(fake_time=self.TIME_1)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual('COHORT_2', ps_json['consent_cohort'], 'Test is built assuming cohort 2')
self.assertEqual(ps_json['pm'][0]['pm_finalized_site'], 'hpo-site-monroeville')
self.assertEqual(ps_json['pm'][0]['pm_status'], 'COMPLETED')
self.assertEqual(ps_json['enrollment_status'], 'CORE_PARTICIPANT')
# send ehr consent expired response
self._submit_ehrconsent_expired(self.participant_id, response_time=self.TIME_3)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
# once CORE, always CORE
self.assertEqual(ps_json['enrollment_status'], 'CORE_PARTICIPANT')
def test_cohort_3_without_gror(self):
self._set_up_participant_data(fake_time=datetime(2020, 6, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual('COHORT_3', ps_json['consent_cohort'], 'Test is built assuming cohort 3')
self.assertEqual('FULLY_CONSENTED', ps_json['enrollment_status'])
def test_cohort_3_with_gror(self):
self._set_up_participant_data(fake_time=datetime(2020, 6, 1))
self._submit_genomics_ror(self.participant_id)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual('COHORT_3', ps_json['consent_cohort'], 'Test is built assuming cohort 3')
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'])
def test_participant_stays_core(self):
self._set_up_participant_data(fake_time=datetime(2020, 5, 1))
self._submit_genomics_ror(self.participant_id,
consent_response=CONSENT_GROR_YES_CODE,
response_time=datetime(2020, 7, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('COHORT_3', ps_json['consent_cohort'], 'Test is built assuming cohort 3')
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'],
'Test is built assuming participant starts as core')
# Send an update to remove GROR consent and make sure participant is still CORE
self._submit_genomics_ror(self.participant_id,
consent_response=CONSENT_GROR_NO_CODE,
response_time=datetime(2020, 9, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'])
# This verifies the module submitted status from the participant generator data for each of the GROR modules
# Also checks that an external id key/value pair exists (but value likely None for test data modules)
gror_modules = self.get_generated_items(ps_json['modules'], item_key='mod_module', item_value='GROR',
sort_key='mod_authored')
self.assertIn('mod_external_id', gror_modules[0])
self.assertEqual('SUBMITTED', gror_modules[0]['mod_status'])
self.assertEqual('SUBMITTED_NO_CONSENT', gror_modules[1]['mod_status'])
def test_previous_ehr_and_dv_ehr_reverted(self):
# Scenario: a participant previously reached core participant status with EHR and DV EHR consent both YES
# If EHR consent is changed to No, they should remain Core
self._set_up_participant_data(skip_ehr=True)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('COHORT_2', ps_json['consent_cohort'],
'Test is built assuming cohort 2 (and that GROR consent is not required for Core status')
self.assertNotEqual('CORE_PARTICIPANT', ps_json['enrollment_status'],
'Test is built assuming participant does not initialize as Core')
# Get Core status through EHR consents
self._submit_ehrconsent(self.participant_id,
response_code=CONSENT_PERMISSION_YES_CODE,
response_time=datetime(2019, 2, 14))
self._submit_dvehrconsent(self.participant_id, response_time=datetime(2019, 4, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'],
'Test is built assuming participant achieves Core status')
# Send an update to remove EHR consent and make sure participant is still CORE
self._submit_ehrconsent(self.participant_id,
response_code=CONSENT_PERMISSION_NO_CODE,
response_time=datetime(2019, 7, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'])
# This verifies the module submitted status from the participant generator data for ehr modules
# Also checks that an external id key/value pair exists (but value likely None for test data modules)
ehr_modules = self.get_generated_items(ps_json['modules'], item_key='mod_module', item_value='EHRConsentPII',
sort_key="mod_authored")
self.assertIn('mod_external_id',ehr_modules[0])
self.assertEqual('SUBMITTED', ehr_modules[0]['mod_status'])
self.assertEqual('SUBMITTED_NO_CONSENT', ehr_modules[1]['mod_status'])
def test_no_on_ehr_overrides_yes_on_dv(self):
# Scenario: a participant has had DV_EHR yes, but previously had a no on EHR.
# No on EHR should supersede a yes on DV_EHR.
self._set_up_participant_data(skip_ehr=True)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('COHORT_2', ps_json['consent_cohort'],
'Test is built assuming cohort 2 (and that GROR consent is not required for Core status')
self._submit_ehrconsent(self.participant_id,
response_code=CONSENT_PERMISSION_NO_CODE,
response_time=datetime(2019, 2, 14))
self._submit_dvehrconsent(self.participant_id, response_time=datetime(2019, 4, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('PARTICIPANT', ps_json['enrollment_status'])
def test_ehr_consent_expired_and_renewed(self):
self._set_up_participant_data(fake_time=self.TIME_1)
# send ehr consent expired response
self._submit_ehrconsent_expired(self.participant_id, response_time=self.TIME_2)
# send a new ehr consent (renewal/reconsent)
self._submit_ehrconsent(self.participant_id,
response_code=CONSENT_PERMISSION_YES_CODE,
response_time=self.TIME_3)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
ehr_consents = self.get_generated_items(ps_json['consents'], item_key='consent_module',
item_value='EHRConsentPII', sort_key='consent_module_authored')
# Confirm a total of three EHR Consent responses
self.assertEqual(len(ehr_consents), 3)
# Verify the initial EHR consent details (sent by _set_up_participant_data)
self.assertEqual(ehr_consents[0].get('consent_module_authored', None),
self.TIME_1.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(ehr_consents[0].get('consent_value', None), CONSENT_PERMISSION_YES_CODE)
# This field should be None for consent payloads that don't contain the expiration hidden question code
self.assertIsNone(ehr_consents[0].get('consent_expired', ''))
# Verify the expired consent response details (contains the hidden expiration question code / answer value)
self.assertEqual(ehr_consents[1].get('consent_module_authored', None),
self.TIME_2.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(ehr_consents[1].get('consent_value', None), CONSENT_PERMISSION_NO_CODE)
self.assertEqual(ehr_consents[1].get('consent_expired', None), EHR_CONSENT_EXPIRED_YES)
# Verify the last EHR consent renewal; 'consent_expired' value should not be carried forward from last consent
self.assertEqual(ehr_consents[2].get('consent_module_authored', None),
self.TIME_3.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(ehr_consents[2].get('consent_value', None), CONSENT_PERMISSION_YES_CODE)
# This field should be None for consent payloads that don't contain the expiration hidden question code
self.assertIsNone(ehr_consents[2].get('consent_expired', ''))
def test_ceremony_decision_fields(self):
# Set up data for different scenarios of withdrawn participants
# Clearing microseconds to avoid rounding time up in database and causing test to fail
two_days_ago = datetime.today().replace(microsecond=0) - timedelta(days=2)
withdrawal_reason_justification = 'testing withdrawal'
no_ceremony_native_american_participant = self.data_generator.create_withdrawn_participant(
withdrawal_reason_justification=withdrawal_reason_justification,
is_native_american=True,
requests_ceremony=WithdrawalAIANCeremonyStatus.DECLINED,
withdrawal_time=two_days_ago
)
ceremony_native_american_participant = self.data_generator.create_withdrawn_participant(
withdrawal_reason_justification=withdrawal_reason_justification,
is_native_american=True,
requests_ceremony=WithdrawalAIANCeremonyStatus.REQUESTED,
withdrawal_time=two_days_ago
)
# Non-AIAN should not have been presented with a ceremony choice
non_native_american_participant = self.data_generator.create_withdrawn_participant(
withdrawal_reason_justification=withdrawal_reason_justification,
is_native_american=False,
requests_ceremony=None,
withdrawal_time=two_days_ago
)
ps_bqs_data = self.make_bq_participant_summary(no_ceremony_native_american_participant.participantId)
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status'),
str(WithdrawalAIANCeremonyStatus.DECLINED))
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status_id'),
int(WithdrawalAIANCeremonyStatus.DECLINED))
ps_bqs_data = self.make_bq_participant_summary(ceremony_native_american_participant.participantId)
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status'),
str(WithdrawalAIANCeremonyStatus.REQUESTED))
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status_id'),
int(WithdrawalAIANCeremonyStatus.REQUESTED))
ps_bqs_data = self.make_bq_participant_summary(non_native_american_participant.participantId)
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status'),
str(WithdrawalAIANCeremonyStatus.UNSET))
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status_id'),
int(WithdrawalAIANCeremonyStatus.UNSET))
| bsd-3-clause | 98f524ea67c4ad3a568d7900bd79858c | 51.02621 | 119 | 0.650068 | 3.637068 | false | true | false | false |
brython-dev/brython | www/cgi-bin/store_speed.py | 1 | 1886 | #!c:/python37/python.exe
# -*- coding: utf-8 -*-
import os
import cgi
import json
import sys
cpython_version = ".".join(str(x) for x in sys.implementation.version[:3])
print('Content-type: text/html\n\n')
print()
fs = cgi.FieldStorage()
results = json.loads(fs["results"].value)
version = fs["version"].value
userAgent = fs["userAgent"].value
data = [
{"test": result["test"],
"description": result["description"],
"src": result["src"].replace("\r\n", "\n"),
"ratio": round(100 * (result["Brython"] / result["CPython"]))
}
for result in results]
json.dump(data, open("speed_results.json", "w", encoding="utf-8"),
indent=4)
with open("speed_results.txt", "w", encoding="utf-8") as out:
for line in data:
out.write(f'{line["description"]};{line["ratio"]}\n')
html = """<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Brython speed compared to CPython</title>
<link rel="stylesheet" href="/brython.css">
<style>
body{
padding-left: 2em;
}
td{
vertical-align: top;
padding: 3px;
}
td, th{
border-style: solid;
border-width: 0px 0px 1px 0px;
border-color: #000;
}
pre{
margin: 0px 0px 0px 5px;
}
</style>
</head>
<body>
<h2>Brython {{version}} performance compared to CPython {{cpython_version}}</h2>
User agent: {{userAgent}}
<p>
<table>
<tr>
<th>Test</th>
<th>Brython<br>(100 = CPython)</th>
<th>Code</th>
</tr>
"""
with open("speed_results.html", "w", encoding="utf-8") as out:
head = html.replace("{{version}}", version).replace("{{userAgent}}",
userAgent).replace("{{cpython_version}}", cpython_version)
out.write(head)
for record in data:
out.write(f'<tr><td>{record["description"]}</td>' +
f'<td align="right"><b>{record["ratio"]}</b></td>' +
f'<td><pre>{record["src"]}</pre></td></tr>\n')
out.write("</table>\n</body>\n</html>") | bsd-3-clause | ebdf1ff143c20abaf17227dd5812e8bd | 23.828947 | 80 | 0.608165 | 2.946875 | false | false | false | false |
brython-dev/brython | www/src/Lib/encodings/mac_romanian.py | 35 | 13968 | """ Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-romanian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\xb4' # 0xAB -> ACUTE ACCENT
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE
'\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
'\u221e' # 0xB0 -> INFINITY
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\xa5' # 0xB4 -> YEN SIGN
'\xb5' # 0xB5 -> MICRO SIGN
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u220f' # 0xB8 -> N-ARY PRODUCT
'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
'\u222b' # 0xBA -> INTEGRAL
'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
'\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE
'\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
'\xbf' # 0xC0 -> INVERTED QUESTION MARK
'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
'\u2248' # 0xC5 -> ALMOST EQUAL TO
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\u2044' # 0xDA -> FRACTION SLASH
'\u20ac' # 0xDB -> EURO SIGN
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
'\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
'\u2021' # 0xE0 -> DOUBLE DAGGER
'\xb7' # 0xE1 -> MIDDLE DOT
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u2030' # 0xE4 -> PER MILLE SIGN
'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\uf8ff' # 0xF0 -> Apple logo
'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u02dc' # 0xF7 -> SMALL TILDE
'\xaf' # 0xF8 -> MACRON
'\u02d8' # 0xF9 -> BREVE
'\u02d9' # 0xFA -> DOT ABOVE
'\u02da' # 0xFB -> RING ABOVE
'\xb8' # 0xFC -> CEDILLA
'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
'\u02db' # 0xFE -> OGONEK
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause | 4dd1443b29e7cfab8bc99774c7118433 | 43.498371 | 118 | 0.52742 | 3.232585 | false | false | false | false |
brython-dev/brython | www/src/Lib/codeop.py | 1 | 5520 | r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
import warnings
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
# The following flags match the values from Include/cpython/compile.h
# Caveat emptor: These flags are undocumented on purpose and depending
# on their effect outside the standard library is **unsupported**.
PyCF_DONT_IMPLY_DEDENT = 0x200
PyCF_ALLOW_INCOMPLETE_INPUT = 0x4000
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments.
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone.
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
try:
return compiler(source, filename, symbol)
except SyntaxError: # Let other compile() errors propagate.
pass
# Catch syntax warnings after the first compile
# to emit warnings (SyntaxWarning, DeprecationWarning) at most once.
with warnings.catch_warnings():
warnings.simplefilter("error")
try:
compiler(source + "\n", filename, symbol)
except SyntaxError as e:
if "incomplete input" in str(e):
return None
raise
def _is_syntax_error(err1, err2):
rep1 = repr(err1)
rep2 = repr(err2)
if "was never closed" in rep1 and "was never closed" in rep2:
return False
if rep1 == rep2:
return True
return False
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default), "exec"
or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, True)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
| bsd-3-clause | 7637ab5f3705bb6213e641d956f62719 | 35.078431 | 98 | 0.676812 | 4.52459 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.